diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index d3ee6153c..eadf3a474 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1,6 +1,6 @@ { "ImportPath": "github.com/restic/restic", - "GoVersion": "go1.4.2", + "GoVersion": "go1.5.1", "Packages": [ "./..." ], @@ -9,6 +9,14 @@ "ImportPath": "bazil.org/fuse", "Rev": "18419ee53958df28fcfc9490fe6123bd59e237bb" }, + { + "ImportPath": "github.com/bradfitz/http2", + "Rev": "f8202bc903bda493ebba4aa54922d78430c2c42f" + }, + { + "ImportPath": "github.com/golang/protobuf/proto", + "Rev": "535a10468679b4cf155f6a7afdf53b554633fc09" + }, { "ImportPath": "github.com/jessevdk/go-flags", "Comment": "v1-297-g1b89bf7", @@ -30,6 +38,10 @@ "ImportPath": "github.com/restic/chunker", "Rev": "fc45043175c38d59374024a38fb7123c40a64f20" }, + { + "ImportPath": "github.com/twinj/uuid", + "Rev": "70cac2bcd273ef6a371bb96cde363d28b68734c3" + }, { "ImportPath": "golang.org/x/crypto/pbkdf2", "Rev": "cc04154d65fb9296747569b107cfd05380b1ea3e" @@ -50,6 +62,35 @@ "ImportPath": "golang.org/x/net/context", "Rev": "7654728e381988afd88e58cabfd6363a5ea91810" }, + { + "ImportPath": "golang.org/x/net/internal/timeseries", + "Rev": "7654728e381988afd88e58cabfd6363a5ea91810" + }, + { + "ImportPath": "golang.org/x/net/trace", + "Rev": "7654728e381988afd88e58cabfd6363a5ea91810" + }, + { + "ImportPath": "golang.org/x/oauth2", + "Rev": "ad0128250e8fba646a92ca9129716e523d63ef9f" + }, + { + "ImportPath": "google.golang.org/api/googleapi", + "Rev": "8cbf97d5e925c857511aedf459affeed490e6861" + }, + { + "ImportPath": "google.golang.org/api/storage/v1", + "Rev": "8cbf97d5e925c857511aedf459affeed490e6861" + }, + { + "ImportPath": "google.golang.org/cloud", + "Comment": "cloud.mailed", + "Rev": "56b945c78d7f2b5bfb33df2cea5275e92e8b9084" + }, + { + "ImportPath": "google.golang.org/grpc", + "Rev": "5feca5d7a525c9432cb0ffbcac62f61ea1e08ba3" + }, { "ImportPath": "gopkg.in/amz.v3/aws", "Rev": "bff3a097c4108da57bb8cbe3aad2990d74d23676" diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/.gitignore b/Godeps/_workspace/src/github.com/bradfitz/http2/.gitignore new file mode 100644 index 000000000..190f12234 --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/.gitignore @@ -0,0 +1,2 @@ +*~ +h2i/h2i diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/AUTHORS b/Godeps/_workspace/src/github.com/bradfitz/http2/AUTHORS new file mode 100644 index 000000000..973453e3f --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/AUTHORS @@ -0,0 +1,20 @@ +# This file is like Go's AUTHORS file: it lists Copyright holders. +# The list of humans who have contributd is in the CONTRIBUTORS file. +# +# To contribute to this project, because it will eventually be folded +# back in to Go itself, you need to submit a CLA: +# +# http://golang.org/doc/contribute.html#copyright +# +# Then you get added to CONTRIBUTORS and you or your company get added +# to the AUTHORS file. + +Blake Mizerany github=bmizerany +Daniel Morsing github=DanielMorsing +Gabriel Aszalos github=gbbr +Google, Inc. +Keith Rarick github=kr +Matthew Keenan github=mattkeenan +Matt Layher github=mdlayher +Perry Abbott github=pabbott0 +Tatsuhiro Tsujikawa github=tatsuhiro-t diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/CONTRIBUTORS b/Godeps/_workspace/src/github.com/bradfitz/http2/CONTRIBUTORS new file mode 100644 index 000000000..f0128c18c --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/CONTRIBUTORS @@ -0,0 +1,20 @@ +# This file is like Go's CONTRIBUTORS file: it lists humans. +# The list of copyright holders (which may be companies) are in the AUTHORS file. +# +# To contribute to this project, because it will eventually be folded +# back in to Go itself, you need to submit a CLA: +# +# http://golang.org/doc/contribute.html#copyright +# +# Then you get added to CONTRIBUTORS and you or your company get added +# to the AUTHORS file. + +Blake Mizerany github=bmizerany +Brad Fitzpatrick github=bradfitz +Daniel Morsing github=DanielMorsing +Gabriel Aszalos github=gbbr +Keith Rarick github=kr +Matthew Keenan github=mattkeenan +Matt Layher github=mdlayher +Perry Abbott github=pabbott0 +Tatsuhiro Tsujikawa github=tatsuhiro-t diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/Dockerfile b/Godeps/_workspace/src/github.com/bradfitz/http2/Dockerfile new file mode 100644 index 000000000..b4e14d55a --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/Dockerfile @@ -0,0 +1,44 @@ +# +# This Dockerfile builds a recent curl with HTTP/2 client support, using +# a recent nghttp2 build. +# +# See the Makefile for how to tag it. If Docker and that image is found, the +# Go tests use this curl binary for integration tests. +# + +FROM ubuntu:trusty + +RUN apt-get update && \ + apt-get upgrade -y && \ + apt-get install -y git-core build-essential wget + +RUN apt-get install -y --no-install-recommends \ + autotools-dev libtool pkg-config zlib1g-dev \ + libcunit1-dev libssl-dev libxml2-dev libevent-dev \ + automake autoconf + +# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached: +ENV NGHTTP2_VER af24f8394e43f4 +RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git + +WORKDIR /root/nghttp2 +RUN git reset --hard $NGHTTP2_VER +RUN autoreconf -i +RUN automake +RUN autoconf +RUN ./configure +RUN make +RUN make install + +WORKDIR /root +RUN wget http://curl.haxx.se/download/curl-7.40.0.tar.gz +RUN tar -zxvf curl-7.40.0.tar.gz +WORKDIR /root/curl-7.40.0 +RUN ./configure --with-ssl --with-nghttp2=/usr/local +RUN make +RUN make install +RUN ldconfig + +CMD ["-h"] +ENTRYPOINT ["/usr/local/bin/curl"] + diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/HACKING b/Godeps/_workspace/src/github.com/bradfitz/http2/HACKING new file mode 100644 index 000000000..69aafe4d1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/HACKING @@ -0,0 +1,5 @@ +We only accept contributions from users who have gone through Go's +contribution process (signed a CLA). + +Please acknowledge whether you have (and use the same email) if +sending a pull request. diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/LICENSE b/Godeps/_workspace/src/github.com/bradfitz/http2/LICENSE new file mode 100644 index 000000000..2dc6853ca --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/LICENSE @@ -0,0 +1,7 @@ +Copyright 2014 Google & the Go AUTHORS + +Go AUTHORS are: +See https://code.google.com/p/go/source/browse/AUTHORS + +Licensed under the terms of Go itself: +https://code.google.com/p/go/source/browse/LICENSE diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/Makefile b/Godeps/_workspace/src/github.com/bradfitz/http2/Makefile new file mode 100644 index 000000000..55fd826f7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/Makefile @@ -0,0 +1,3 @@ +curlimage: + docker build -t gohttp2/curl . + diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/README b/Godeps/_workspace/src/github.com/bradfitz/http2/README new file mode 100644 index 000000000..16cb516a6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/README @@ -0,0 +1,17 @@ +This is a work-in-progress HTTP/2 implementation for Go. + +It will eventually live in the Go standard library and won't require +any changes to your code to use. It will just be automatic. + +Status: + +* The server support is pretty good. A few things are missing + but are being worked on. +* The client work has just started but shares a lot of code + is coming along much quicker. + +Docs are at https://godoc.org/github.com/bradfitz/http2 + +Demo test server at https://http2.golang.org/ + +Help & bug reports welcome. diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/buffer.go b/Godeps/_workspace/src/github.com/bradfitz/http2/buffer.go new file mode 100644 index 000000000..c43954cf0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/buffer.go @@ -0,0 +1,76 @@ +// Copyright 2014 The Go Authors. +// See https://code.google.com/p/go/source/browse/CONTRIBUTORS +// Licensed under the same terms as Go itself: +// https://code.google.com/p/go/source/browse/LICENSE + +package http2 + +import ( + "errors" +) + +// buffer is an io.ReadWriteCloser backed by a fixed size buffer. +// It never allocates, but moves old data as new data is written. +type buffer struct { + buf []byte + r, w int + closed bool + err error // err to return to reader +} + +var ( + errReadEmpty = errors.New("read from empty buffer") + errWriteClosed = errors.New("write on closed buffer") + errWriteFull = errors.New("write on full buffer") +) + +// Read copies bytes from the buffer into p. +// It is an error to read when no data is available. +func (b *buffer) Read(p []byte) (n int, err error) { + n = copy(p, b.buf[b.r:b.w]) + b.r += n + if b.closed && b.r == b.w { + err = b.err + } else if b.r == b.w && n == 0 { + err = errReadEmpty + } + return n, err +} + +// Len returns the number of bytes of the unread portion of the buffer. +func (b *buffer) Len() int { + return b.w - b.r +} + +// Write copies bytes from p into the buffer. +// It is an error to write more data than the buffer can hold. +func (b *buffer) Write(p []byte) (n int, err error) { + if b.closed { + return 0, errWriteClosed + } + + // Slide existing data to beginning. + if b.r > 0 && len(p) > len(b.buf)-b.w { + copy(b.buf, b.buf[b.r:b.w]) + b.w -= b.r + b.r = 0 + } + + // Write new data. + n = copy(b.buf[b.w:], p) + b.w += n + if n < len(p) { + err = errWriteFull + } + return n, err +} + +// Close marks the buffer as closed. Future calls to Write will +// return an error. Future calls to Read, once the buffer is +// empty, will return err. +func (b *buffer) Close(err error) { + if !b.closed { + b.closed = true + b.err = err + } +} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/buffer_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/buffer_test.go new file mode 100644 index 000000000..ea76905b8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/buffer_test.go @@ -0,0 +1,154 @@ +// Copyright 2014 The Go Authors. +// See https://code.google.com/p/go/source/browse/CONTRIBUTORS +// Licensed under the same terms as Go itself: +// https://code.google.com/p/go/source/browse/LICENSE + +package http2 + +import ( + "io" + "reflect" + "testing" +) + +var bufferReadTests = []struct { + buf buffer + read, wn int + werr error + wp []byte + wbuf buffer +}{ + { + buffer{[]byte{'a', 0}, 0, 1, false, nil}, + 5, 1, nil, []byte{'a'}, + buffer{[]byte{'a', 0}, 1, 1, false, nil}, + }, + { + buffer{[]byte{'a', 0}, 0, 1, true, io.EOF}, + 5, 1, io.EOF, []byte{'a'}, + buffer{[]byte{'a', 0}, 1, 1, true, io.EOF}, + }, + { + buffer{[]byte{0, 'a'}, 1, 2, false, nil}, + 5, 1, nil, []byte{'a'}, + buffer{[]byte{0, 'a'}, 2, 2, false, nil}, + }, + { + buffer{[]byte{0, 'a'}, 1, 2, true, io.EOF}, + 5, 1, io.EOF, []byte{'a'}, + buffer{[]byte{0, 'a'}, 2, 2, true, io.EOF}, + }, + { + buffer{[]byte{}, 0, 0, false, nil}, + 5, 0, errReadEmpty, []byte{}, + buffer{[]byte{}, 0, 0, false, nil}, + }, + { + buffer{[]byte{}, 0, 0, true, io.EOF}, + 5, 0, io.EOF, []byte{}, + buffer{[]byte{}, 0, 0, true, io.EOF}, + }, +} + +func TestBufferRead(t *testing.T) { + for i, tt := range bufferReadTests { + read := make([]byte, tt.read) + n, err := tt.buf.Read(read) + if n != tt.wn { + t.Errorf("#%d: wn = %d want %d", i, n, tt.wn) + continue + } + if err != tt.werr { + t.Errorf("#%d: werr = %v want %v", i, err, tt.werr) + continue + } + read = read[:n] + if !reflect.DeepEqual(read, tt.wp) { + t.Errorf("#%d: read = %+v want %+v", i, read, tt.wp) + } + if !reflect.DeepEqual(tt.buf, tt.wbuf) { + t.Errorf("#%d: buf = %+v want %+v", i, tt.buf, tt.wbuf) + } + } +} + +var bufferWriteTests = []struct { + buf buffer + write, wn int + werr error + wbuf buffer +}{ + { + buf: buffer{ + buf: []byte{}, + }, + wbuf: buffer{ + buf: []byte{}, + }, + }, + { + buf: buffer{ + buf: []byte{1, 'a'}, + }, + write: 1, + wn: 1, + wbuf: buffer{ + buf: []byte{0, 'a'}, + w: 1, + }, + }, + { + buf: buffer{ + buf: []byte{'a', 1}, + r: 1, + w: 1, + }, + write: 2, + wn: 2, + wbuf: buffer{ + buf: []byte{0, 0}, + w: 2, + }, + }, + { + buf: buffer{ + buf: []byte{}, + r: 1, + closed: true, + }, + write: 5, + werr: errWriteClosed, + wbuf: buffer{ + buf: []byte{}, + r: 1, + closed: true, + }, + }, + { + buf: buffer{ + buf: []byte{}, + }, + write: 5, + werr: errWriteFull, + wbuf: buffer{ + buf: []byte{}, + }, + }, +} + +func TestBufferWrite(t *testing.T) { + for i, tt := range bufferWriteTests { + n, err := tt.buf.Write(make([]byte, tt.write)) + if n != tt.wn { + t.Errorf("#%d: wrote %d bytes; want %d", i, n, tt.wn) + continue + } + if err != tt.werr { + t.Errorf("#%d: error = %v; want %v", i, err, tt.werr) + continue + } + if !reflect.DeepEqual(tt.buf, tt.wbuf) { + t.Errorf("#%d: buf = %+v; want %+v", i, tt.buf, tt.wbuf) + } + } +} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/errors.go b/Godeps/_workspace/src/github.com/bradfitz/http2/errors.go new file mode 100644 index 000000000..c885328a8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/errors.go @@ -0,0 +1,78 @@ +// Copyright 2014 The Go Authors. +// See https://code.google.com/p/go/source/browse/CONTRIBUTORS +// Licensed under the same terms as Go itself: +// https://code.google.com/p/go/source/browse/LICENSE + +package http2 + +import "fmt" + +// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec. +type ErrCode uint32 + +const ( + ErrCodeNo ErrCode = 0x0 + ErrCodeProtocol ErrCode = 0x1 + ErrCodeInternal ErrCode = 0x2 + ErrCodeFlowControl ErrCode = 0x3 + ErrCodeSettingsTimeout ErrCode = 0x4 + ErrCodeStreamClosed ErrCode = 0x5 + ErrCodeFrameSize ErrCode = 0x6 + ErrCodeRefusedStream ErrCode = 0x7 + ErrCodeCancel ErrCode = 0x8 + ErrCodeCompression ErrCode = 0x9 + ErrCodeConnect ErrCode = 0xa + ErrCodeEnhanceYourCalm ErrCode = 0xb + ErrCodeInadequateSecurity ErrCode = 0xc + ErrCodeHTTP11Required ErrCode = 0xd +) + +var errCodeName = map[ErrCode]string{ + ErrCodeNo: "NO_ERROR", + ErrCodeProtocol: "PROTOCOL_ERROR", + ErrCodeInternal: "INTERNAL_ERROR", + ErrCodeFlowControl: "FLOW_CONTROL_ERROR", + ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT", + ErrCodeStreamClosed: "STREAM_CLOSED", + ErrCodeFrameSize: "FRAME_SIZE_ERROR", + ErrCodeRefusedStream: "REFUSED_STREAM", + ErrCodeCancel: "CANCEL", + ErrCodeCompression: "COMPRESSION_ERROR", + ErrCodeConnect: "CONNECT_ERROR", + ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM", + ErrCodeInadequateSecurity: "INADEQUATE_SECURITY", + ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED", +} + +func (e ErrCode) String() string { + if s, ok := errCodeName[e]; ok { + return s + } + return fmt.Sprintf("unknown error code 0x%x", uint32(e)) +} + +// ConnectionError is an error that results in the termination of the +// entire connection. +type ConnectionError ErrCode + +func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) } + +// StreamError is an error that only affects one stream within an +// HTTP/2 connection. +type StreamError struct { + StreamID uint32 + Code ErrCode +} + +func (e StreamError) Error() string { + return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code) +} + +// 6.9.1 The Flow Control Window +// "If a sender receives a WINDOW_UPDATE that causes a flow control +// window to exceed this maximum it MUST terminate either the stream +// or the connection, as appropriate. For streams, [...]; for the +// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code." +type goAwayFlowError struct{} + +func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" } diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/errors_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/errors_test.go new file mode 100644 index 000000000..86d52ee65 --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/errors_test.go @@ -0,0 +1,27 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// See https://code.google.com/p/go/source/browse/CONTRIBUTORS +// Licensed under the same terms as Go itself: +// https://code.google.com/p/go/source/browse/LICENSE + +package http2 + +import "testing" + +func TestErrCodeString(t *testing.T) { + tests := []struct { + err ErrCode + want string + }{ + {ErrCodeProtocol, "PROTOCOL_ERROR"}, + {0xd, "HTTP_1_1_REQUIRED"}, + {0xf, "unknown error code 0xf"}, + } + for i, tt := range tests { + got := tt.err.String() + if got != tt.want { + t.Errorf("%d. Error = %q; want %q", i, got, tt.want) + } + } +} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/flow.go b/Godeps/_workspace/src/github.com/bradfitz/http2/flow.go new file mode 100644 index 000000000..540fc4283 --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/flow.go @@ -0,0 +1,51 @@ +// Copyright 2014 The Go Authors. +// See https://code.google.com/p/go/source/browse/CONTRIBUTORS +// Licensed under the same terms as Go itself: +// https://code.google.com/p/go/source/browse/LICENSE + +// Flow control + +package http2 + +// flow is the flow control window's size. +type flow struct { + // n is the number of DATA bytes we're allowed to send. + // A flow is kept both on a conn and a per-stream. + n int32 + + // conn points to the shared connection-level flow that is + // shared by all streams on that conn. It is nil for the flow + // that's on the conn directly. + conn *flow +} + +func (f *flow) setConnFlow(cf *flow) { f.conn = cf } + +func (f *flow) available() int32 { + n := f.n + if f.conn != nil && f.conn.n < n { + n = f.conn.n + } + return n +} + +func (f *flow) take(n int32) { + if n > f.available() { + panic("internal error: took too much") + } + f.n -= n + if f.conn != nil { + f.conn.n -= n + } +} + +// add adds n bytes (positive or negative) to the flow control window. +// It returns false if the sum would exceed 2^31-1. +func (f *flow) add(n int32) bool { + remain := (1<<31 - 1) - f.n + if n > remain { + return false + } + f.n += n + return true +} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/flow_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/flow_test.go new file mode 100644 index 000000000..dbb656f8e --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/flow_test.go @@ -0,0 +1,54 @@ +// Copyright 2014 The Go Authors. +// See https://code.google.com/p/go/source/browse/CONTRIBUTORS +// Licensed under the same terms as Go itself: +// https://code.google.com/p/go/source/browse/LICENSE + +package http2 + +import "testing" + +func TestFlow(t *testing.T) { + var st flow + var conn flow + st.add(3) + conn.add(2) + + if got, want := st.available(), int32(3); got != want { + t.Errorf("available = %d; want %d", got, want) + } + st.setConnFlow(&conn) + if got, want := st.available(), int32(2); got != want { + t.Errorf("after parent setup, available = %d; want %d", got, want) + } + + st.take(2) + if got, want := conn.available(), int32(0); got != want { + t.Errorf("after taking 2, conn = %d; want %d", got, want) + } + if got, want := st.available(), int32(0); got != want { + t.Errorf("after taking 2, stream = %d; want %d", got, want) + } +} + +func TestFlowAdd(t *testing.T) { + var f flow + if !f.add(1) { + t.Fatal("failed to add 1") + } + if !f.add(-1) { + t.Fatal("failed to add -1") + } + if got, want := f.available(), int32(0); got != want { + t.Fatalf("size = %d; want %d", got, want) + } + if !f.add(1<<31 - 1) { + t.Fatal("failed to add 2^31-1") + } + if got, want := f.available(), int32(1<<31-1); got != want { + t.Fatalf("size = %d; want %d", got, want) + } + if f.add(1) { + t.Fatal("adding 1 to max shouldn't be allowed") + } + +} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/frame.go b/Godeps/_workspace/src/github.com/bradfitz/http2/frame.go new file mode 100644 index 000000000..e8b872a19 --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/frame.go @@ -0,0 +1,1113 @@ +// Copyright 2014 The Go Authors. +// See https://code.google.com/p/go/source/browse/CONTRIBUTORS +// Licensed under the same terms as Go itself: +// https://code.google.com/p/go/source/browse/LICENSE + +package http2 + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "sync" +) + +const frameHeaderLen = 9 + +var padZeros = make([]byte, 255) // zeros for padding + +// A FrameType is a registered frame type as defined in +// http://http2.github.io/http2-spec/#rfc.section.11.2 +type FrameType uint8 + +const ( + FrameData FrameType = 0x0 + FrameHeaders FrameType = 0x1 + FramePriority FrameType = 0x2 + FrameRSTStream FrameType = 0x3 + FrameSettings FrameType = 0x4 + FramePushPromise FrameType = 0x5 + FramePing FrameType = 0x6 + FrameGoAway FrameType = 0x7 + FrameWindowUpdate FrameType = 0x8 + FrameContinuation FrameType = 0x9 +) + +var frameName = map[FrameType]string{ + FrameData: "DATA", + FrameHeaders: "HEADERS", + FramePriority: "PRIORITY", + FrameRSTStream: "RST_STREAM", + FrameSettings: "SETTINGS", + FramePushPromise: "PUSH_PROMISE", + FramePing: "PING", + FrameGoAway: "GOAWAY", + FrameWindowUpdate: "WINDOW_UPDATE", + FrameContinuation: "CONTINUATION", +} + +func (t FrameType) String() string { + if s, ok := frameName[t]; ok { + return s + } + return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t)) +} + +// Flags is a bitmask of HTTP/2 flags. +// The meaning of flags varies depending on the frame type. +type Flags uint8 + +// Has reports whether f contains all (0 or more) flags in v. +func (f Flags) Has(v Flags) bool { + return (f & v) == v +} + +// Frame-specific FrameHeader flag bits. +const ( + // Data Frame + FlagDataEndStream Flags = 0x1 + FlagDataPadded Flags = 0x8 + + // Headers Frame + FlagHeadersEndStream Flags = 0x1 + FlagHeadersEndHeaders Flags = 0x4 + FlagHeadersPadded Flags = 0x8 + FlagHeadersPriority Flags = 0x20 + + // Settings Frame + FlagSettingsAck Flags = 0x1 + + // Ping Frame + FlagPingAck Flags = 0x1 + + // Continuation Frame + FlagContinuationEndHeaders Flags = 0x4 + + FlagPushPromiseEndHeaders Flags = 0x4 + FlagPushPromisePadded Flags = 0x8 +) + +var flagName = map[FrameType]map[Flags]string{ + FrameData: { + FlagDataEndStream: "END_STREAM", + FlagDataPadded: "PADDED", + }, + FrameHeaders: { + FlagHeadersEndStream: "END_STREAM", + FlagHeadersEndHeaders: "END_HEADERS", + FlagHeadersPadded: "PADDED", + FlagHeadersPriority: "PRIORITY", + }, + FrameSettings: { + FlagSettingsAck: "ACK", + }, + FramePing: { + FlagPingAck: "ACK", + }, + FrameContinuation: { + FlagContinuationEndHeaders: "END_HEADERS", + }, + FramePushPromise: { + FlagPushPromiseEndHeaders: "END_HEADERS", + FlagPushPromisePadded: "PADDED", + }, +} + +// a frameParser parses a frame given its FrameHeader and payload +// bytes. The length of payload will always equal fh.Length (which +// might be 0). +type frameParser func(fh FrameHeader, payload []byte) (Frame, error) + +var frameParsers = map[FrameType]frameParser{ + FrameData: parseDataFrame, + FrameHeaders: parseHeadersFrame, + FramePriority: parsePriorityFrame, + FrameRSTStream: parseRSTStreamFrame, + FrameSettings: parseSettingsFrame, + FramePushPromise: parsePushPromise, + FramePing: parsePingFrame, + FrameGoAway: parseGoAwayFrame, + FrameWindowUpdate: parseWindowUpdateFrame, + FrameContinuation: parseContinuationFrame, +} + +func typeFrameParser(t FrameType) frameParser { + if f := frameParsers[t]; f != nil { + return f + } + return parseUnknownFrame +} + +// A FrameHeader is the 9 byte header of all HTTP/2 frames. +// +// See http://http2.github.io/http2-spec/#FrameHeader +type FrameHeader struct { + valid bool // caller can access []byte fields in the Frame + + // Type is the 1 byte frame type. There are ten standard frame + // types, but extension frame types may be written by WriteRawFrame + // and will be returned by ReadFrame (as UnknownFrame). + Type FrameType + + // Flags are the 1 byte of 8 potential bit flags per frame. + // They are specific to the frame type. + Flags Flags + + // Length is the length of the frame, not including the 9 byte header. + // The maximum size is one byte less than 16MB (uint24), but only + // frames up to 16KB are allowed without peer agreement. + Length uint32 + + // StreamID is which stream this frame is for. Certain frames + // are not stream-specific, in which case this field is 0. + StreamID uint32 +} + +// Header returns h. It exists so FrameHeaders can be embedded in other +// specific frame types and implement the Frame interface. +func (h FrameHeader) Header() FrameHeader { return h } + +func (h FrameHeader) String() string { + var buf bytes.Buffer + buf.WriteString("[FrameHeader ") + buf.WriteString(h.Type.String()) + if h.Flags != 0 { + buf.WriteString(" flags=") + set := 0 + for i := uint8(0); i < 8; i++ { + if h.Flags&(1< 1 { + buf.WriteByte('|') + } + name := flagName[h.Type][Flags(1<>24), + byte(streamID>>16), + byte(streamID>>8), + byte(streamID)) +} + +func (f *Framer) endWrite() error { + // Now that we know the final size, fill in the FrameHeader in + // the space previously reserved for it. Abuse append. + length := len(f.wbuf) - frameHeaderLen + if length >= (1 << 24) { + return ErrFrameTooLarge + } + _ = append(f.wbuf[:0], + byte(length>>16), + byte(length>>8), + byte(length)) + n, err := f.w.Write(f.wbuf) + if err == nil && n != len(f.wbuf) { + err = io.ErrShortWrite + } + return err +} + +func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) } +func (f *Framer) writeBytes(v []byte) { f.wbuf = append(f.wbuf, v...) } +func (f *Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) } +func (f *Framer) writeUint32(v uint32) { + f.wbuf = append(f.wbuf, byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +const ( + minMaxFrameSize = 1 << 14 + maxFrameSize = 1<<24 - 1 +) + +// NewFramer returns a Framer that writes frames to w and reads them from r. +func NewFramer(w io.Writer, r io.Reader) *Framer { + fr := &Framer{ + w: w, + r: r, + } + fr.getReadBuf = func(size uint32) []byte { + if cap(fr.readBuf) >= int(size) { + return fr.readBuf[:size] + } + fr.readBuf = make([]byte, size) + return fr.readBuf + } + fr.SetMaxReadFrameSize(maxFrameSize) + return fr +} + +// SetMaxReadFrameSize sets the maximum size of a frame +// that will be read by a subsequent call to ReadFrame. +// It is the caller's responsibility to advertise this +// limit with a SETTINGS frame. +func (fr *Framer) SetMaxReadFrameSize(v uint32) { + if v > maxFrameSize { + v = maxFrameSize + } + fr.maxReadSize = v +} + +// ErrFrameTooLarge is returned from Framer.ReadFrame when the peer +// sends a frame that is larger than declared with SetMaxReadFrameSize. +var ErrFrameTooLarge = errors.New("http2: frame too large") + +// ReadFrame reads a single frame. The returned Frame is only valid +// until the next call to ReadFrame. +// If the frame is larger than previously set with SetMaxReadFrameSize, +// the returned error is ErrFrameTooLarge. +func (fr *Framer) ReadFrame() (Frame, error) { + if fr.lastFrame != nil { + fr.lastFrame.invalidate() + } + fh, err := readFrameHeader(fr.headerBuf[:], fr.r) + if err != nil { + return nil, err + } + if fh.Length > fr.maxReadSize { + return nil, ErrFrameTooLarge + } + payload := fr.getReadBuf(fh.Length) + if _, err := io.ReadFull(fr.r, payload); err != nil { + return nil, err + } + f, err := typeFrameParser(fh.Type)(fh, payload) + if err != nil { + return nil, err + } + fr.lastFrame = f + return f, nil +} + +// A DataFrame conveys arbitrary, variable-length sequences of octets +// associated with a stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.1 +type DataFrame struct { + FrameHeader + data []byte +} + +func (f *DataFrame) StreamEnded() bool { + return f.FrameHeader.Flags.Has(FlagDataEndStream) +} + +// Data returns the frame's data octets, not including any padding +// size byte or padding suffix bytes. +// The caller must not retain the returned memory past the next +// call to ReadFrame. +func (f *DataFrame) Data() []byte { + f.checkValid() + return f.data +} + +func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) { + if fh.StreamID == 0 { + // DATA frames MUST be associated with a stream. If a + // DATA frame is received whose stream identifier + // field is 0x0, the recipient MUST respond with a + // connection error (Section 5.4.1) of type + // PROTOCOL_ERROR. + return nil, ConnectionError(ErrCodeProtocol) + } + f := &DataFrame{ + FrameHeader: fh, + } + var padSize byte + if fh.Flags.Has(FlagDataPadded) { + var err error + payload, padSize, err = readByte(payload) + if err != nil { + return nil, err + } + } + if int(padSize) > len(payload) { + // If the length of the padding is greater than the + // length of the frame payload, the recipient MUST + // treat this as a connection error. + // Filed: https://github.com/http2/http2-spec/issues/610 + return nil, ConnectionError(ErrCodeProtocol) + } + f.data = payload[:len(payload)-int(padSize)] + return f, nil +} + +var errStreamID = errors.New("invalid streamid") + +func validStreamID(streamID uint32) bool { + return streamID != 0 && streamID&(1<<31) == 0 +} + +// WriteData writes a DATA frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error { + // TODO: ignoring padding for now. will add when somebody cares. + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + var flags Flags + if endStream { + flags |= FlagDataEndStream + } + f.startWrite(FrameData, flags, streamID) + f.wbuf = append(f.wbuf, data...) + return f.endWrite() +} + +// A SettingsFrame conveys configuration parameters that affect how +// endpoints communicate, such as preferences and constraints on peer +// behavior. +// +// See http://http2.github.io/http2-spec/#SETTINGS +type SettingsFrame struct { + FrameHeader + p []byte +} + +func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) { + if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 { + // When this (ACK 0x1) bit is set, the payload of the + // SETTINGS frame MUST be empty. Receipt of a + // SETTINGS frame with the ACK flag set and a length + // field value other than 0 MUST be treated as a + // connection error (Section 5.4.1) of type + // FRAME_SIZE_ERROR. + return nil, ConnectionError(ErrCodeFrameSize) + } + if fh.StreamID != 0 { + // SETTINGS frames always apply to a connection, + // never a single stream. The stream identifier for a + // SETTINGS frame MUST be zero (0x0). If an endpoint + // receives a SETTINGS frame whose stream identifier + // field is anything other than 0x0, the endpoint MUST + // respond with a connection error (Section 5.4.1) of + // type PROTOCOL_ERROR. + return nil, ConnectionError(ErrCodeProtocol) + } + if len(p)%6 != 0 { + // Expecting even number of 6 byte settings. + return nil, ConnectionError(ErrCodeFrameSize) + } + f := &SettingsFrame{FrameHeader: fh, p: p} + if v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 { + // Values above the maximum flow control window size of 2^31 - 1 MUST + // be treated as a connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR. + return nil, ConnectionError(ErrCodeFlowControl) + } + return f, nil +} + +func (f *SettingsFrame) IsAck() bool { + return f.FrameHeader.Flags.Has(FlagSettingsAck) +} + +func (f *SettingsFrame) Value(s SettingID) (v uint32, ok bool) { + f.checkValid() + buf := f.p + for len(buf) > 0 { + settingID := SettingID(binary.BigEndian.Uint16(buf[:2])) + if settingID == s { + return binary.BigEndian.Uint32(buf[2:6]), true + } + buf = buf[6:] + } + return 0, false +} + +// ForeachSetting runs fn for each setting. +// It stops and returns the first error. +func (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error { + f.checkValid() + buf := f.p + for len(buf) > 0 { + if err := fn(Setting{ + SettingID(binary.BigEndian.Uint16(buf[:2])), + binary.BigEndian.Uint32(buf[2:6]), + }); err != nil { + return err + } + buf = buf[6:] + } + return nil +} + +// WriteSettings writes a SETTINGS frame with zero or more settings +// specified and the ACK bit not set. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteSettings(settings ...Setting) error { + f.startWrite(FrameSettings, 0, 0) + for _, s := range settings { + f.writeUint16(uint16(s.ID)) + f.writeUint32(s.Val) + } + return f.endWrite() +} + +// WriteSettings writes an empty SETTINGS frame with the ACK bit set. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteSettingsAck() error { + f.startWrite(FrameSettings, FlagSettingsAck, 0) + return f.endWrite() +} + +// A PingFrame is a mechanism for measuring a minimal round trip time +// from the sender, as well as determining whether an idle connection +// is still functional. +// See http://http2.github.io/http2-spec/#rfc.section.6.7 +type PingFrame struct { + FrameHeader + Data [8]byte +} + +func parsePingFrame(fh FrameHeader, payload []byte) (Frame, error) { + if len(payload) != 8 { + return nil, ConnectionError(ErrCodeFrameSize) + } + if fh.StreamID != 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + f := &PingFrame{FrameHeader: fh} + copy(f.Data[:], payload) + return f, nil +} + +func (f *Framer) WritePing(ack bool, data [8]byte) error { + var flags Flags + if ack { + flags = FlagPingAck + } + f.startWrite(FramePing, flags, 0) + f.writeBytes(data[:]) + return f.endWrite() +} + +// A GoAwayFrame informs the remote peer to stop creating streams on this connection. +// See http://http2.github.io/http2-spec/#rfc.section.6.8 +type GoAwayFrame struct { + FrameHeader + LastStreamID uint32 + ErrCode ErrCode + debugData []byte +} + +// DebugData returns any debug data in the GOAWAY frame. Its contents +// are not defined. +// The caller must not retain the returned memory past the next +// call to ReadFrame. +func (f *GoAwayFrame) DebugData() []byte { + f.checkValid() + return f.debugData +} + +func parseGoAwayFrame(fh FrameHeader, p []byte) (Frame, error) { + if fh.StreamID != 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + if len(p) < 8 { + return nil, ConnectionError(ErrCodeFrameSize) + } + return &GoAwayFrame{ + FrameHeader: fh, + LastStreamID: binary.BigEndian.Uint32(p[:4]) & (1<<31 - 1), + ErrCode: ErrCode(binary.BigEndian.Uint32(p[4:8])), + debugData: p[8:], + }, nil +} + +func (f *Framer) WriteGoAway(maxStreamID uint32, code ErrCode, debugData []byte) error { + f.startWrite(FrameGoAway, 0, 0) + f.writeUint32(maxStreamID & (1<<31 - 1)) + f.writeUint32(uint32(code)) + f.writeBytes(debugData) + return f.endWrite() +} + +// An UnknownFrame is the frame type returned when the frame type is unknown +// or no specific frame type parser exists. +type UnknownFrame struct { + FrameHeader + p []byte +} + +// Payload returns the frame's payload (after the header). It is not +// valid to call this method after a subsequent call to +// Framer.ReadFrame, nor is it valid to retain the returned slice. +// The memory is owned by the Framer and is invalidated when the next +// frame is read. +func (f *UnknownFrame) Payload() []byte { + f.checkValid() + return f.p +} + +func parseUnknownFrame(fh FrameHeader, p []byte) (Frame, error) { + return &UnknownFrame{fh, p}, nil +} + +// A WindowUpdateFrame is used to implement flow control. +// See http://http2.github.io/http2-spec/#rfc.section.6.9 +type WindowUpdateFrame struct { + FrameHeader + Increment uint32 +} + +func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) { + if len(p) != 4 { + return nil, ConnectionError(ErrCodeFrameSize) + } + inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit + if inc == 0 { + // A receiver MUST treat the receipt of a + // WINDOW_UPDATE frame with an flow control window + // increment of 0 as a stream error (Section 5.4.2) of + // type PROTOCOL_ERROR; errors on the connection flow + // control window MUST be treated as a connection + // error (Section 5.4.1). + if fh.StreamID == 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + return nil, StreamError{fh.StreamID, ErrCodeProtocol} + } + return &WindowUpdateFrame{ + FrameHeader: fh, + Increment: inc, + }, nil +} + +// WriteWindowUpdate writes a WINDOW_UPDATE frame. +// The increment value must be between 1 and 2,147,483,647, inclusive. +// If the Stream ID is zero, the window update applies to the +// connection as a whole. +func (f *Framer) WriteWindowUpdate(streamID, incr uint32) error { + // "The legal range for the increment to the flow control window is 1 to 2^31-1 (2,147,483,647) octets." + if (incr < 1 || incr > 2147483647) && !f.AllowIllegalWrites { + return errors.New("illegal window increment value") + } + f.startWrite(FrameWindowUpdate, 0, streamID) + f.writeUint32(incr) + return f.endWrite() +} + +// A HeadersFrame is used to open a stream and additionally carries a +// header block fragment. +type HeadersFrame struct { + FrameHeader + + // Priority is set if FlagHeadersPriority is set in the FrameHeader. + Priority PriorityParam + + headerFragBuf []byte // not owned +} + +func (f *HeadersFrame) HeaderBlockFragment() []byte { + f.checkValid() + return f.headerFragBuf +} + +func (f *HeadersFrame) HeadersEnded() bool { + return f.FrameHeader.Flags.Has(FlagHeadersEndHeaders) +} + +func (f *HeadersFrame) StreamEnded() bool { + return f.FrameHeader.Flags.Has(FlagHeadersEndStream) +} + +func (f *HeadersFrame) HasPriority() bool { + return f.FrameHeader.Flags.Has(FlagHeadersPriority) +} + +func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) { + hf := &HeadersFrame{ + FrameHeader: fh, + } + if fh.StreamID == 0 { + // HEADERS frames MUST be associated with a stream. If a HEADERS frame + // is received whose stream identifier field is 0x0, the recipient MUST + // respond with a connection error (Section 5.4.1) of type + // PROTOCOL_ERROR. + return nil, ConnectionError(ErrCodeProtocol) + } + var padLength uint8 + if fh.Flags.Has(FlagHeadersPadded) { + if p, padLength, err = readByte(p); err != nil { + return + } + } + if fh.Flags.Has(FlagHeadersPriority) { + var v uint32 + p, v, err = readUint32(p) + if err != nil { + return nil, err + } + hf.Priority.StreamDep = v & 0x7fffffff + hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set + p, hf.Priority.Weight, err = readByte(p) + if err != nil { + return nil, err + } + } + if len(p)-int(padLength) <= 0 { + return nil, StreamError{fh.StreamID, ErrCodeProtocol} + } + hf.headerFragBuf = p[:len(p)-int(padLength)] + return hf, nil +} + +// HeadersFrameParam are the parameters for writing a HEADERS frame. +type HeadersFrameParam struct { + // StreamID is the required Stream ID to initiate. + StreamID uint32 + // BlockFragment is part (or all) of a Header Block. + BlockFragment []byte + + // EndStream indicates that the header block is the last that + // the endpoint will send for the identified stream. Setting + // this flag causes the stream to enter one of "half closed" + // states. + EndStream bool + + // EndHeaders indicates that this frame contains an entire + // header block and is not followed by any + // CONTINUATION frames. + EndHeaders bool + + // PadLength is the optional number of bytes of zeros to add + // to this frame. + PadLength uint8 + + // Priority, if non-zero, includes stream priority information + // in the HEADER frame. + Priority PriorityParam +} + +// WriteHeaders writes a single HEADERS frame. +// +// This is a low-level header writing method. Encoding headers and +// splitting them into any necessary CONTINUATION frames is handled +// elsewhere. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteHeaders(p HeadersFrameParam) error { + if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { + return errStreamID + } + var flags Flags + if p.PadLength != 0 { + flags |= FlagHeadersPadded + } + if p.EndStream { + flags |= FlagHeadersEndStream + } + if p.EndHeaders { + flags |= FlagHeadersEndHeaders + } + if !p.Priority.IsZero() { + flags |= FlagHeadersPriority + } + f.startWrite(FrameHeaders, flags, p.StreamID) + if p.PadLength != 0 { + f.writeByte(p.PadLength) + } + if !p.Priority.IsZero() { + v := p.Priority.StreamDep + if !validStreamID(v) && !f.AllowIllegalWrites { + return errors.New("invalid dependent stream id") + } + if p.Priority.Exclusive { + v |= 1 << 31 + } + f.writeUint32(v) + f.writeByte(p.Priority.Weight) + } + f.wbuf = append(f.wbuf, p.BlockFragment...) + f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) + return f.endWrite() +} + +// A PriorityFrame specifies the sender-advised priority of a stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.3 +type PriorityFrame struct { + FrameHeader + PriorityParam +} + +// PriorityParam are the stream prioritzation parameters. +type PriorityParam struct { + // StreamDep is a 31-bit stream identifier for the + // stream that this stream depends on. Zero means no + // dependency. + StreamDep uint32 + + // Exclusive is whether the dependency is exclusive. + Exclusive bool + + // Weight is the stream's zero-indexed weight. It should be + // set together with StreamDep, or neither should be set. Per + // the spec, "Add one to the value to obtain a weight between + // 1 and 256." + Weight uint8 +} + +func (p PriorityParam) IsZero() bool { + return p == PriorityParam{} +} + +func parsePriorityFrame(fh FrameHeader, payload []byte) (Frame, error) { + if fh.StreamID == 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + if len(payload) != 5 { + return nil, ConnectionError(ErrCodeFrameSize) + } + v := binary.BigEndian.Uint32(payload[:4]) + streamID := v & 0x7fffffff // mask off high bit + return &PriorityFrame{ + FrameHeader: fh, + PriorityParam: PriorityParam{ + Weight: payload[4], + StreamDep: streamID, + Exclusive: streamID != v, // was high bit set? + }, + }, nil +} + +// WritePriority writes a PRIORITY frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WritePriority(streamID uint32, p PriorityParam) error { + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + f.startWrite(FramePriority, 0, streamID) + v := p.StreamDep + if p.Exclusive { + v |= 1 << 31 + } + f.writeUint32(v) + f.writeByte(p.Weight) + return f.endWrite() +} + +// A RSTStreamFrame allows for abnormal termination of a stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.4 +type RSTStreamFrame struct { + FrameHeader + ErrCode ErrCode +} + +func parseRSTStreamFrame(fh FrameHeader, p []byte) (Frame, error) { + if len(p) != 4 { + return nil, ConnectionError(ErrCodeFrameSize) + } + if fh.StreamID == 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + return &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil +} + +// WriteRSTStream writes a RST_STREAM frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteRSTStream(streamID uint32, code ErrCode) error { + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + f.startWrite(FrameRSTStream, 0, streamID) + f.writeUint32(uint32(code)) + return f.endWrite() +} + +// A ContinuationFrame is used to continue a sequence of header block fragments. +// See http://http2.github.io/http2-spec/#rfc.section.6.10 +type ContinuationFrame struct { + FrameHeader + headerFragBuf []byte +} + +func parseContinuationFrame(fh FrameHeader, p []byte) (Frame, error) { + return &ContinuationFrame{fh, p}, nil +} + +func (f *ContinuationFrame) StreamEnded() bool { + return f.FrameHeader.Flags.Has(FlagDataEndStream) +} + +func (f *ContinuationFrame) HeaderBlockFragment() []byte { + f.checkValid() + return f.headerFragBuf +} + +func (f *ContinuationFrame) HeadersEnded() bool { + return f.FrameHeader.Flags.Has(FlagContinuationEndHeaders) +} + +// WriteContinuation writes a CONTINUATION frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) error { + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + var flags Flags + if endHeaders { + flags |= FlagContinuationEndHeaders + } + f.startWrite(FrameContinuation, flags, streamID) + f.wbuf = append(f.wbuf, headerBlockFragment...) + return f.endWrite() +} + +// A PushPromiseFrame is used to initiate a server stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.6 +type PushPromiseFrame struct { + FrameHeader + PromiseID uint32 + headerFragBuf []byte // not owned +} + +func (f *PushPromiseFrame) HeaderBlockFragment() []byte { + f.checkValid() + return f.headerFragBuf +} + +func (f *PushPromiseFrame) HeadersEnded() bool { + return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders) +} + +func parsePushPromise(fh FrameHeader, p []byte) (_ Frame, err error) { + pp := &PushPromiseFrame{ + FrameHeader: fh, + } + if pp.StreamID == 0 { + // PUSH_PROMISE frames MUST be associated with an existing, + // peer-initiated stream. The stream identifier of a + // PUSH_PROMISE frame indicates the stream it is associated + // with. If the stream identifier field specifies the value + // 0x0, a recipient MUST respond with a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + return nil, ConnectionError(ErrCodeProtocol) + } + // The PUSH_PROMISE frame includes optional padding. + // Padding fields and flags are identical to those defined for DATA frames + var padLength uint8 + if fh.Flags.Has(FlagPushPromisePadded) { + if p, padLength, err = readByte(p); err != nil { + return + } + } + + p, pp.PromiseID, err = readUint32(p) + if err != nil { + return + } + pp.PromiseID = pp.PromiseID & (1<<31 - 1) + + if int(padLength) > len(p) { + // like the DATA frame, error out if padding is longer than the body. + return nil, ConnectionError(ErrCodeProtocol) + } + pp.headerFragBuf = p[:len(p)-int(padLength)] + return pp, nil +} + +// PushPromiseParam are the parameters for writing a PUSH_PROMISE frame. +type PushPromiseParam struct { + // StreamID is the required Stream ID to initiate. + StreamID uint32 + + // PromiseID is the required Stream ID which this + // Push Promises + PromiseID uint32 + + // BlockFragment is part (or all) of a Header Block. + BlockFragment []byte + + // EndHeaders indicates that this frame contains an entire + // header block and is not followed by any + // CONTINUATION frames. + EndHeaders bool + + // PadLength is the optional number of bytes of zeros to add + // to this frame. + PadLength uint8 +} + +// WritePushPromise writes a single PushPromise Frame. +// +// As with Header Frames, This is the low level call for writing +// individual frames. Continuation frames are handled elsewhere. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WritePushPromise(p PushPromiseParam) error { + if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { + return errStreamID + } + var flags Flags + if p.PadLength != 0 { + flags |= FlagPushPromisePadded + } + if p.EndHeaders { + flags |= FlagPushPromiseEndHeaders + } + f.startWrite(FramePushPromise, flags, p.StreamID) + if p.PadLength != 0 { + f.writeByte(p.PadLength) + } + if !validStreamID(p.PromiseID) && !f.AllowIllegalWrites { + return errStreamID + } + f.writeUint32(p.PromiseID) + f.wbuf = append(f.wbuf, p.BlockFragment...) + f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) + return f.endWrite() +} + +// WriteRawFrame writes a raw frame. This can be used to write +// extension frames unknown to this package. +func (f *Framer) WriteRawFrame(t FrameType, flags Flags, streamID uint32, payload []byte) error { + f.startWrite(t, flags, streamID) + f.writeBytes(payload) + return f.endWrite() +} + +func readByte(p []byte) (remain []byte, b byte, err error) { + if len(p) == 0 { + return nil, 0, io.ErrUnexpectedEOF + } + return p[1:], p[0], nil +} + +func readUint32(p []byte) (remain []byte, v uint32, err error) { + if len(p) < 4 { + return nil, 0, io.ErrUnexpectedEOF + } + return p[4:], binary.BigEndian.Uint32(p[:4]), nil +} + +type streamEnder interface { + StreamEnded() bool +} + +type headersEnder interface { + HeadersEnded() bool +} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/frame_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/frame_test.go new file mode 100644 index 000000000..20027c5f1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/frame_test.go @@ -0,0 +1,597 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "reflect" + "strings" + "testing" + "unsafe" +) + +func testFramer() (*Framer, *bytes.Buffer) { + buf := new(bytes.Buffer) + return NewFramer(buf, buf), buf +} + +func TestFrameSizes(t *testing.T) { + // Catch people rearranging the FrameHeader fields. + if got, want := int(unsafe.Sizeof(FrameHeader{})), 12; got != want { + t.Errorf("FrameHeader size = %d; want %d", got, want) + } +} + +func TestFrameTypeString(t *testing.T) { + tests := []struct { + ft FrameType + want string + }{ + {FrameData, "DATA"}, + {FramePing, "PING"}, + {FrameGoAway, "GOAWAY"}, + {0xf, "UNKNOWN_FRAME_TYPE_15"}, + } + + for i, tt := range tests { + got := tt.ft.String() + if got != tt.want { + t.Errorf("%d. String(FrameType %d) = %q; want %q", i, int(tt.ft), got, tt.want) + } + } +} + +func TestWriteRST(t *testing.T) { + fr, buf := testFramer() + var streamID uint32 = 1<<24 + 2<<16 + 3<<8 + 4 + var errCode uint32 = 7<<24 + 6<<16 + 5<<8 + 4 + fr.WriteRSTStream(streamID, ErrCode(errCode)) + const wantEnc = "\x00\x00\x04\x03\x00\x01\x02\x03\x04\x07\x06\x05\x04" + if buf.String() != wantEnc { + t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc) + } + f, err := fr.ReadFrame() + if err != nil { + t.Fatal(err) + } + want := &RSTStreamFrame{ + FrameHeader: FrameHeader{ + valid: true, + Type: 0x3, + Flags: 0x0, + Length: 0x4, + StreamID: 0x1020304, + }, + ErrCode: 0x7060504, + } + if !reflect.DeepEqual(f, want) { + t.Errorf("parsed back %#v; want %#v", f, want) + } +} + +func TestWriteData(t *testing.T) { + fr, buf := testFramer() + var streamID uint32 = 1<<24 + 2<<16 + 3<<8 + 4 + data := []byte("ABC") + fr.WriteData(streamID, true, data) + const wantEnc = "\x00\x00\x03\x00\x01\x01\x02\x03\x04ABC" + if buf.String() != wantEnc { + t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc) + } + f, err := fr.ReadFrame() + if err != nil { + t.Fatal(err) + } + df, ok := f.(*DataFrame) + if !ok { + t.Fatalf("got %T; want *DataFrame", f) + } + if !bytes.Equal(df.Data(), data) { + t.Errorf("got %q; want %q", df.Data(), data) + } + if f.Header().Flags&1 == 0 { + t.Errorf("didn't see END_STREAM flag") + } +} + +func TestWriteHeaders(t *testing.T) { + tests := []struct { + name string + p HeadersFrameParam + wantEnc string + wantFrame *HeadersFrame + }{ + { + "basic", + HeadersFrameParam{ + StreamID: 42, + BlockFragment: []byte("abc"), + Priority: PriorityParam{}, + }, + "\x00\x00\x03\x01\x00\x00\x00\x00*abc", + &HeadersFrame{ + FrameHeader: FrameHeader{ + valid: true, + StreamID: 42, + Type: FrameHeaders, + Length: uint32(len("abc")), + }, + Priority: PriorityParam{}, + headerFragBuf: []byte("abc"), + }, + }, + { + "basic + end flags", + HeadersFrameParam{ + StreamID: 42, + BlockFragment: []byte("abc"), + EndStream: true, + EndHeaders: true, + Priority: PriorityParam{}, + }, + "\x00\x00\x03\x01\x05\x00\x00\x00*abc", + &HeadersFrame{ + FrameHeader: FrameHeader{ + valid: true, + StreamID: 42, + Type: FrameHeaders, + Flags: FlagHeadersEndStream | FlagHeadersEndHeaders, + Length: uint32(len("abc")), + }, + Priority: PriorityParam{}, + headerFragBuf: []byte("abc"), + }, + }, + { + "with padding", + HeadersFrameParam{ + StreamID: 42, + BlockFragment: []byte("abc"), + EndStream: true, + EndHeaders: true, + PadLength: 5, + Priority: PriorityParam{}, + }, + "\x00\x00\t\x01\r\x00\x00\x00*\x05abc\x00\x00\x00\x00\x00", + &HeadersFrame{ + FrameHeader: FrameHeader{ + valid: true, + StreamID: 42, + Type: FrameHeaders, + Flags: FlagHeadersEndStream | FlagHeadersEndHeaders | FlagHeadersPadded, + Length: uint32(1 + len("abc") + 5), // pad length + contents + padding + }, + Priority: PriorityParam{}, + headerFragBuf: []byte("abc"), + }, + }, + { + "with priority", + HeadersFrameParam{ + StreamID: 42, + BlockFragment: []byte("abc"), + EndStream: true, + EndHeaders: true, + PadLength: 2, + Priority: PriorityParam{ + StreamDep: 15, + Exclusive: true, + Weight: 127, + }, + }, + "\x00\x00\v\x01-\x00\x00\x00*\x02\x80\x00\x00\x0f\u007fabc\x00\x00", + &HeadersFrame{ + FrameHeader: FrameHeader{ + valid: true, + StreamID: 42, + Type: FrameHeaders, + Flags: FlagHeadersEndStream | FlagHeadersEndHeaders | FlagHeadersPadded | FlagHeadersPriority, + Length: uint32(1 + 5 + len("abc") + 2), // pad length + priority + contents + padding + }, + Priority: PriorityParam{ + StreamDep: 15, + Exclusive: true, + Weight: 127, + }, + headerFragBuf: []byte("abc"), + }, + }, + } + for _, tt := range tests { + fr, buf := testFramer() + if err := fr.WriteHeaders(tt.p); err != nil { + t.Errorf("test %q: %v", tt.name, err) + continue + } + if buf.String() != tt.wantEnc { + t.Errorf("test %q: encoded %q; want %q", tt.name, buf.Bytes(), tt.wantEnc) + } + f, err := fr.ReadFrame() + if err != nil { + t.Errorf("test %q: failed to read the frame back: %v", tt.name, err) + continue + } + if !reflect.DeepEqual(f, tt.wantFrame) { + t.Errorf("test %q: mismatch.\n got: %#v\nwant: %#v\n", tt.name, f, tt.wantFrame) + } + } +} + +func TestWriteContinuation(t *testing.T) { + const streamID = 42 + tests := []struct { + name string + end bool + frag []byte + + wantFrame *ContinuationFrame + }{ + { + "not end", + false, + []byte("abc"), + &ContinuationFrame{ + FrameHeader: FrameHeader{ + valid: true, + StreamID: streamID, + Type: FrameContinuation, + Length: uint32(len("abc")), + }, + headerFragBuf: []byte("abc"), + }, + }, + { + "end", + true, + []byte("def"), + &ContinuationFrame{ + FrameHeader: FrameHeader{ + valid: true, + StreamID: streamID, + Type: FrameContinuation, + Flags: FlagContinuationEndHeaders, + Length: uint32(len("def")), + }, + headerFragBuf: []byte("def"), + }, + }, + } + for _, tt := range tests { + fr, _ := testFramer() + if err := fr.WriteContinuation(streamID, tt.end, tt.frag); err != nil { + t.Errorf("test %q: %v", tt.name, err) + continue + } + f, err := fr.ReadFrame() + if err != nil { + t.Errorf("test %q: failed to read the frame back: %v", tt.name, err) + continue + } + if !reflect.DeepEqual(f, tt.wantFrame) { + t.Errorf("test %q: mismatch.\n got: %#v\nwant: %#v\n", tt.name, f, tt.wantFrame) + } + } +} + +func TestWritePriority(t *testing.T) { + const streamID = 42 + tests := []struct { + name string + priority PriorityParam + wantFrame *PriorityFrame + }{ + { + "not exclusive", + PriorityParam{ + StreamDep: 2, + Exclusive: false, + Weight: 127, + }, + &PriorityFrame{ + FrameHeader{ + valid: true, + StreamID: streamID, + Type: FramePriority, + Length: 5, + }, + PriorityParam{ + StreamDep: 2, + Exclusive: false, + Weight: 127, + }, + }, + }, + + { + "exclusive", + PriorityParam{ + StreamDep: 3, + Exclusive: true, + Weight: 77, + }, + &PriorityFrame{ + FrameHeader{ + valid: true, + StreamID: streamID, + Type: FramePriority, + Length: 5, + }, + PriorityParam{ + StreamDep: 3, + Exclusive: true, + Weight: 77, + }, + }, + }, + } + for _, tt := range tests { + fr, _ := testFramer() + if err := fr.WritePriority(streamID, tt.priority); err != nil { + t.Errorf("test %q: %v", tt.name, err) + continue + } + f, err := fr.ReadFrame() + if err != nil { + t.Errorf("test %q: failed to read the frame back: %v", tt.name, err) + continue + } + if !reflect.DeepEqual(f, tt.wantFrame) { + t.Errorf("test %q: mismatch.\n got: %#v\nwant: %#v\n", tt.name, f, tt.wantFrame) + } + } +} + +func TestWriteSettings(t *testing.T) { + fr, buf := testFramer() + settings := []Setting{{1, 2}, {3, 4}} + fr.WriteSettings(settings...) + const wantEnc = "\x00\x00\f\x04\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x02\x00\x03\x00\x00\x00\x04" + if buf.String() != wantEnc { + t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc) + } + f, err := fr.ReadFrame() + if err != nil { + t.Fatal(err) + } + sf, ok := f.(*SettingsFrame) + if !ok { + t.Fatalf("Got a %T; want a SettingsFrame", f) + } + var got []Setting + sf.ForeachSetting(func(s Setting) error { + got = append(got, s) + valBack, ok := sf.Value(s.ID) + if !ok || valBack != s.Val { + t.Errorf("Value(%d) = %v, %v; want %v, true", s.ID, valBack, ok, s.Val) + } + return nil + }) + if !reflect.DeepEqual(settings, got) { + t.Errorf("Read settings %+v != written settings %+v", got, settings) + } +} + +func TestWriteSettingsAck(t *testing.T) { + fr, buf := testFramer() + fr.WriteSettingsAck() + const wantEnc = "\x00\x00\x00\x04\x01\x00\x00\x00\x00" + if buf.String() != wantEnc { + t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc) + } +} + +func TestWriteWindowUpdate(t *testing.T) { + fr, buf := testFramer() + const streamID = 1<<24 + 2<<16 + 3<<8 + 4 + const incr = 7<<24 + 6<<16 + 5<<8 + 4 + if err := fr.WriteWindowUpdate(streamID, incr); err != nil { + t.Fatal(err) + } + const wantEnc = "\x00\x00\x04\x08\x00\x01\x02\x03\x04\x07\x06\x05\x04" + if buf.String() != wantEnc { + t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc) + } + f, err := fr.ReadFrame() + if err != nil { + t.Fatal(err) + } + want := &WindowUpdateFrame{ + FrameHeader: FrameHeader{ + valid: true, + Type: 0x8, + Flags: 0x0, + Length: 0x4, + StreamID: 0x1020304, + }, + Increment: 0x7060504, + } + if !reflect.DeepEqual(f, want) { + t.Errorf("parsed back %#v; want %#v", f, want) + } +} + +func TestWritePing(t *testing.T) { testWritePing(t, false) } +func TestWritePingAck(t *testing.T) { testWritePing(t, true) } + +func testWritePing(t *testing.T, ack bool) { + fr, buf := testFramer() + if err := fr.WritePing(ack, [8]byte{1, 2, 3, 4, 5, 6, 7, 8}); err != nil { + t.Fatal(err) + } + var wantFlags Flags + if ack { + wantFlags = FlagPingAck + } + var wantEnc = "\x00\x00\x08\x06" + string(wantFlags) + "\x00\x00\x00\x00" + "\x01\x02\x03\x04\x05\x06\x07\x08" + if buf.String() != wantEnc { + t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc) + } + + f, err := fr.ReadFrame() + if err != nil { + t.Fatal(err) + } + want := &PingFrame{ + FrameHeader: FrameHeader{ + valid: true, + Type: 0x6, + Flags: wantFlags, + Length: 0x8, + StreamID: 0, + }, + Data: [8]byte{1, 2, 3, 4, 5, 6, 7, 8}, + } + if !reflect.DeepEqual(f, want) { + t.Errorf("parsed back %#v; want %#v", f, want) + } +} + +func TestReadFrameHeader(t *testing.T) { + tests := []struct { + in string + want FrameHeader + }{ + {in: "\x00\x00\x00" + "\x00" + "\x00" + "\x00\x00\x00\x00", want: FrameHeader{}}, + {in: "\x01\x02\x03" + "\x04" + "\x05" + "\x06\x07\x08\x09", want: FrameHeader{ + Length: 66051, Type: 4, Flags: 5, StreamID: 101124105, + }}, + // Ignore high bit: + {in: "\xff\xff\xff" + "\xff" + "\xff" + "\xff\xff\xff\xff", want: FrameHeader{ + Length: 16777215, Type: 255, Flags: 255, StreamID: 2147483647}}, + {in: "\xff\xff\xff" + "\xff" + "\xff" + "\x7f\xff\xff\xff", want: FrameHeader{ + Length: 16777215, Type: 255, Flags: 255, StreamID: 2147483647}}, + } + for i, tt := range tests { + got, err := readFrameHeader(make([]byte, 9), strings.NewReader(tt.in)) + if err != nil { + t.Errorf("%d. readFrameHeader(%q) = %v", i, tt.in, err) + continue + } + tt.want.valid = true + if got != tt.want { + t.Errorf("%d. readFrameHeader(%q) = %+v; want %+v", i, tt.in, got, tt.want) + } + } +} + +func TestReadWriteFrameHeader(t *testing.T) { + tests := []struct { + len uint32 + typ FrameType + flags Flags + streamID uint32 + }{ + {len: 0, typ: 255, flags: 1, streamID: 0}, + {len: 0, typ: 255, flags: 1, streamID: 1}, + {len: 0, typ: 255, flags: 1, streamID: 255}, + {len: 0, typ: 255, flags: 1, streamID: 256}, + {len: 0, typ: 255, flags: 1, streamID: 65535}, + {len: 0, typ: 255, flags: 1, streamID: 65536}, + + {len: 0, typ: 1, flags: 255, streamID: 1}, + {len: 255, typ: 1, flags: 255, streamID: 1}, + {len: 256, typ: 1, flags: 255, streamID: 1}, + {len: 65535, typ: 1, flags: 255, streamID: 1}, + {len: 65536, typ: 1, flags: 255, streamID: 1}, + {len: 16777215, typ: 1, flags: 255, streamID: 1}, + } + for _, tt := range tests { + fr, buf := testFramer() + fr.startWrite(tt.typ, tt.flags, tt.streamID) + fr.writeBytes(make([]byte, tt.len)) + fr.endWrite() + fh, err := ReadFrameHeader(buf) + if err != nil { + t.Errorf("ReadFrameHeader(%+v) = %v", tt, err) + continue + } + if fh.Type != tt.typ || fh.Flags != tt.flags || fh.Length != tt.len || fh.StreamID != tt.streamID { + t.Errorf("ReadFrameHeader(%+v) = %+v; mismatch", tt, fh) + } + } + +} + +func TestWriteTooLargeFrame(t *testing.T) { + fr, _ := testFramer() + fr.startWrite(0, 1, 1) + fr.writeBytes(make([]byte, 1<<24)) + err := fr.endWrite() + if err != ErrFrameTooLarge { + t.Errorf("endWrite = %v; want errFrameTooLarge", err) + } +} + +func TestWriteGoAway(t *testing.T) { + const debug = "foo" + fr, buf := testFramer() + if err := fr.WriteGoAway(0x01020304, 0x05060708, []byte(debug)); err != nil { + t.Fatal(err) + } + const wantEnc = "\x00\x00\v\a\x00\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08" + debug + if buf.String() != wantEnc { + t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc) + } + f, err := fr.ReadFrame() + if err != nil { + t.Fatal(err) + } + want := &GoAwayFrame{ + FrameHeader: FrameHeader{ + valid: true, + Type: 0x7, + Flags: 0, + Length: uint32(4 + 4 + len(debug)), + StreamID: 0, + }, + LastStreamID: 0x01020304, + ErrCode: 0x05060708, + debugData: []byte(debug), + } + if !reflect.DeepEqual(f, want) { + t.Fatalf("parsed back:\n%#v\nwant:\n%#v", f, want) + } + if got := string(f.(*GoAwayFrame).DebugData()); got != debug { + t.Errorf("debug data = %q; want %q", got, debug) + } +} + +func TestWritePushPromise(t *testing.T) { + pp := PushPromiseParam{ + StreamID: 42, + PromiseID: 42, + BlockFragment: []byte("abc"), + } + fr, buf := testFramer() + if err := fr.WritePushPromise(pp); err != nil { + t.Fatal(err) + } + const wantEnc = "\x00\x00\x07\x05\x00\x00\x00\x00*\x00\x00\x00*abc" + if buf.String() != wantEnc { + t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc) + } + f, err := fr.ReadFrame() + if err != nil { + t.Fatal(err) + } + _, ok := f.(*PushPromiseFrame) + if !ok { + t.Fatalf("got %T; want *PushPromiseFrame", f) + } + want := &PushPromiseFrame{ + FrameHeader: FrameHeader{ + valid: true, + Type: 0x5, + Flags: 0x0, + Length: 0x7, + StreamID: 42, + }, + PromiseID: 42, + headerFragBuf: []byte("abc"), + } + if !reflect.DeepEqual(f, want) { + t.Fatalf("parsed back:\n%#v\nwant:\n%#v", f, want) + } +} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/gotrack.go b/Godeps/_workspace/src/github.com/bradfitz/http2/gotrack.go new file mode 100644 index 000000000..7dc2ef90d --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/gotrack.go @@ -0,0 +1,173 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// See https://code.google.com/p/go/source/browse/CONTRIBUTORS +// Licensed under the same terms as Go itself: +// https://code.google.com/p/go/source/browse/LICENSE + +// Defensive debug-only utility to track that functions run on the +// goroutine that they're supposed to. + +package http2 + +import ( + "bytes" + "errors" + "fmt" + "os" + "runtime" + "strconv" + "sync" +) + +var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1" + +type goroutineLock uint64 + +func newGoroutineLock() goroutineLock { + if !DebugGoroutines { + return 0 + } + return goroutineLock(curGoroutineID()) +} + +func (g goroutineLock) check() { + if !DebugGoroutines { + return + } + if curGoroutineID() != uint64(g) { + panic("running on the wrong goroutine") + } +} + +func (g goroutineLock) checkNotOn() { + if !DebugGoroutines { + return + } + if curGoroutineID() == uint64(g) { + panic("running on the wrong goroutine") + } +} + +var goroutineSpace = []byte("goroutine ") + +func curGoroutineID() uint64 { + bp := littleBuf.Get().(*[]byte) + defer littleBuf.Put(bp) + b := *bp + b = b[:runtime.Stack(b, false)] + // Parse the 4707 out of "goroutine 4707 [" + b = bytes.TrimPrefix(b, goroutineSpace) + i := bytes.IndexByte(b, ' ') + if i < 0 { + panic(fmt.Sprintf("No space found in %q", b)) + } + b = b[:i] + n, err := parseUintBytes(b, 10, 64) + if err != nil { + panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err)) + } + return n +} + +var littleBuf = sync.Pool{ + New: func() interface{} { + buf := make([]byte, 64) + return &buf + }, +} + +// parseUintBytes is like strconv.ParseUint, but using a []byte. +func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) { + var cutoff, maxVal uint64 + + if bitSize == 0 { + bitSize = int(strconv.IntSize) + } + + s0 := s + switch { + case len(s) < 1: + err = strconv.ErrSyntax + goto Error + + case 2 <= base && base <= 36: + // valid base; nothing to do + + case base == 0: + // Look for octal, hex prefix. + switch { + case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'): + base = 16 + s = s[2:] + if len(s) < 1 { + err = strconv.ErrSyntax + goto Error + } + case s[0] == '0': + base = 8 + default: + base = 10 + } + + default: + err = errors.New("invalid base " + strconv.Itoa(base)) + goto Error + } + + n = 0 + cutoff = cutoff64(base) + maxVal = 1<= base { + n = 0 + err = strconv.ErrSyntax + goto Error + } + + if n >= cutoff { + // n*base overflows + n = 1<<64 - 1 + err = strconv.ErrRange + goto Error + } + n *= uint64(base) + + n1 := n + uint64(v) + if n1 < n || n1 > maxVal { + // n+v overflows + n = 1<<64 - 1 + err = strconv.ErrRange + goto Error + } + n = n1 + } + + return n, nil + +Error: + return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err} +} + +// Return the first number n such that n*base >= 1<<64. +func cutoff64(base int) uint64 { + if base < 2 { + return 0 + } + return (1<<64-1)/uint64(base) + 1 +} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/gotrack_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/gotrack_test.go new file mode 100644 index 000000000..3ff4957e9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/gotrack_test.go @@ -0,0 +1,33 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// See https://code.google.com/p/go/source/browse/CONTRIBUTORS +// Licensed under the same terms as Go itself: +// https://code.google.com/p/go/source/browse/LICENSE + +package http2 + +import ( + "fmt" + "strings" + "testing" +) + +func TestGoroutineLock(t *testing.T) { + DebugGoroutines = true + g := newGoroutineLock() + g.check() + + sawPanic := make(chan interface{}) + go func() { + defer func() { sawPanic <- recover() }() + g.check() // should panic + }() + e := <-sawPanic + if e == nil { + t.Fatal("did not see panic from check in other goroutine") + } + if !strings.Contains(fmt.Sprint(e), "wrong goroutine") { + t.Errorf("expected on see panic about running on the wrong goroutine; got %v", e) + } +} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/.gitignore b/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/.gitignore new file mode 100644 index 000000000..0de86ddbc --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/.gitignore @@ -0,0 +1,5 @@ +h2demo +h2demo.linux +client-id.dat +client-secret.dat +token.dat diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/Makefile b/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/Makefile new file mode 100644 index 000000000..3a77cf073 --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/Makefile @@ -0,0 +1,5 @@ +h2demo.linux: h2demo.go + GOOS=linux go build --tags=h2demo -o h2demo.linux . + +upload: h2demo.linux + cat h2demo.linux | go run launch.go --write_object=http2-demo-server-tls/h2demo --write_object_is_public diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/README b/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/README new file mode 100644 index 000000000..212a96f38 --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/README @@ -0,0 +1,16 @@ + +Client: + -- Firefox nightly with about:config network.http.spdy.enabled.http2draft set true + -- Chrome: go to chrome://flags/#enable-spdy4, save and restart (button at bottom) + +Make CA: +$ openssl genrsa -out rootCA.key 2048 +$ openssl req -x509 -new -nodes -key rootCA.key -days 1024 -out rootCA.pem +... install that to Firefox + +Make cert: +$ openssl genrsa -out server.key 2048 +$ openssl req -new -key server.key -out server.csr +$ openssl x509 -req -in server.csr -CA rootCA.pem -CAkey rootCA.key -CAcreateserial -out server.crt -days 500 + + diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/h2demo.go b/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/h2demo.go new file mode 100644 index 000000000..de6caaa74 --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/h2demo.go @@ -0,0 +1,426 @@ +// Copyright 2014 The Go Authors. +// See https://code.google.com/p/go/source/browse/CONTRIBUTORS +// Licensed under the same terms as Go itself: +// https://code.google.com/p/go/source/browse/LICENSE + +// +build h2demo + +package main + +import ( + "bytes" + "crypto/tls" + "flag" + "fmt" + "hash/crc32" + "image" + "image/jpeg" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "os/exec" + "path" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "camlistore.org/pkg/googlestorage" + "camlistore.org/pkg/singleflight" + "github.com/bradfitz/http2" +) + +var ( + openFirefox = flag.Bool("openff", false, "Open Firefox") + addr = flag.String("addr", "localhost:4430", "TLS address to listen on") + httpAddr = flag.String("httpaddr", "", "If non-empty, address to listen for regular HTTP on") + prod = flag.Bool("prod", false, "Whether to configure itself to be the production http2.golang.org server.") +) + +func homeOldHTTP(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, ` + +

Go + HTTP/2

+

Welcome to the Go language's HTTP/2 demo & interop server.

+

Unfortunately, you're not using HTTP/2 right now. To do so:

+
    +
  • Use Firefox Nightly or go to about:config and enable "network.http.spdy.enabled.http2draft"
  • +
  • Use Google Chrome Canary and/or go to chrome://flags/#enable-spdy4 to Enable SPDY/4 (Chrome's name for HTTP/2)
  • +
+

See code & instructions for connecting at https://github.com/bradfitz/http2.

+ +`) +} + +func home(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/" { + http.NotFound(w, r) + return + } + io.WriteString(w, ` + +

Go + HTTP/2

+ +

Welcome to the Go language's HTTP/2 demo & interop server.

+ +

Congratulations, you're using HTTP/2 right now.

+ +

This server exists for others in the HTTP/2 community to test their HTTP/2 client implementations and point out flaws in our server.

+ +

The code is currently at github.com/bradfitz/http2 +but will move to the Go standard library at some point in the future +(enabled by default, without users needing to change their code).

+ +

Contact info: bradfitz@golang.org, or file a bug.

+ +

Handlers for testing

+
    +
  • GET /reqinfo to dump the request + headers received
  • +
  • GET /clockstream streams the current time every second
  • +
  • GET /gophertiles to see a page with a bunch of images
  • +
  • GET /file/gopher.png for a small file (does If-Modified-Since, Content-Range, etc)
  • +
  • GET /file/go.src.tar.gz for a larger file (~10 MB)
  • +
  • GET /redirect to redirect back to / (this page)
  • +
  • GET /goroutines to see all active goroutines in this server
  • +
  • PUT something to /crc32 to get a count of number of bytes and its CRC-32
  • +
+ +`) +} + +func reqInfoHandler(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain") + fmt.Fprintf(w, "Method: %s\n", r.Method) + fmt.Fprintf(w, "Protocol: %s\n", r.Proto) + fmt.Fprintf(w, "Host: %s\n", r.Host) + fmt.Fprintf(w, "RemoteAddr: %s\n", r.RemoteAddr) + fmt.Fprintf(w, "RequestURI: %q\n", r.RequestURI) + fmt.Fprintf(w, "URL: %#v\n", r.URL) + fmt.Fprintf(w, "Body.ContentLength: %d (-1 means unknown)\n", r.ContentLength) + fmt.Fprintf(w, "Close: %v (relevant for HTTP/1 only)\n", r.Close) + fmt.Fprintf(w, "TLS: %#v\n", r.TLS) + fmt.Fprintf(w, "\nHeaders:\n") + r.Header.Write(w) +} + +func crcHandler(w http.ResponseWriter, r *http.Request) { + if r.Method != "PUT" { + http.Error(w, "PUT required.", 400) + return + } + crc := crc32.NewIEEE() + n, err := io.Copy(crc, r.Body) + if err == nil { + w.Header().Set("Content-Type", "text/plain") + fmt.Fprintf(w, "bytes=%d, CRC32=%x", n, crc.Sum(nil)) + } +} + +var ( + fsGrp singleflight.Group + fsMu sync.Mutex // guards fsCache + fsCache = map[string]http.Handler{} +) + +// fileServer returns a file-serving handler that proxies URL. +// It lazily fetches URL on the first access and caches its contents forever. +func fileServer(url string) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + hi, err := fsGrp.Do(url, func() (interface{}, error) { + fsMu.Lock() + if h, ok := fsCache[url]; ok { + fsMu.Unlock() + return h, nil + } + fsMu.Unlock() + + res, err := http.Get(url) + if err != nil { + return nil, err + } + defer res.Body.Close() + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + + modTime := time.Now() + var h http.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.ServeContent(w, r, path.Base(url), modTime, bytes.NewReader(slurp)) + }) + fsMu.Lock() + fsCache[url] = h + fsMu.Unlock() + return h, nil + }) + if err != nil { + http.Error(w, err.Error(), 500) + return + } + hi.(http.Handler).ServeHTTP(w, r) + }) +} + +func clockStreamHandler(w http.ResponseWriter, r *http.Request) { + clientGone := w.(http.CloseNotifier).CloseNotify() + w.Header().Set("Content-Type", "text/plain") + ticker := time.NewTicker(1 * time.Second) + defer ticker.Stop() + fmt.Fprintf(w, "# ~1KB of junk to force browsers to start rendering immediately: \n") + io.WriteString(w, strings.Repeat("# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n", 13)) + + for { + fmt.Fprintf(w, "%v\n", time.Now()) + w.(http.Flusher).Flush() + select { + case <-ticker.C: + case <-clientGone: + log.Printf("Client %v disconnected from the clock", r.RemoteAddr) + return + } + } +} + +func registerHandlers() { + tiles := newGopherTilesHandler() + + mux2 := http.NewServeMux() + http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + if r.TLS == nil { + if r.URL.Path == "/gophertiles" { + tiles.ServeHTTP(w, r) + return + } + http.Redirect(w, r, "https://http2.golang.org/", http.StatusFound) + return + } + if r.ProtoMajor == 1 { + if r.URL.Path == "/reqinfo" { + reqInfoHandler(w, r) + return + } + homeOldHTTP(w, r) + return + } + mux2.ServeHTTP(w, r) + }) + mux2.HandleFunc("/", home) + mux2.Handle("/file/gopher.png", fileServer("https://golang.org/doc/gopher/frontpage.png")) + mux2.Handle("/file/go.src.tar.gz", fileServer("https://storage.googleapis.com/golang/go1.4.1.src.tar.gz")) + mux2.HandleFunc("/reqinfo", reqInfoHandler) + mux2.HandleFunc("/crc32", crcHandler) + mux2.HandleFunc("/clockstream", clockStreamHandler) + mux2.Handle("/gophertiles", tiles) + mux2.HandleFunc("/redirect", func(w http.ResponseWriter, r *http.Request) { + http.Redirect(w, r, "/", http.StatusFound) + }) + stripHomedir := regexp.MustCompile(`/(Users|home)/\w+`) + mux2.HandleFunc("/goroutines", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + buf := make([]byte, 2<<20) + w.Write(stripHomedir.ReplaceAll(buf[:runtime.Stack(buf, true)], nil)) + }) +} + +func newGopherTilesHandler() http.Handler { + const gopherURL = "https://blog.golang.org/go-programming-language-turns-two_gophers.jpg" + res, err := http.Get(gopherURL) + if err != nil { + log.Fatal(err) + } + if res.StatusCode != 200 { + log.Fatalf("Error fetching %s: %v", gopherURL, res.Status) + } + slurp, err := ioutil.ReadAll(res.Body) + res.Body.Close() + if err != nil { + log.Fatal(err) + } + im, err := jpeg.Decode(bytes.NewReader(slurp)) + if err != nil { + if len(slurp) > 1024 { + slurp = slurp[:1024] + } + log.Fatalf("Failed to decode gopher image: %v (got %q)", err, slurp) + } + + type subImager interface { + SubImage(image.Rectangle) image.Image + } + const tileSize = 32 + xt := im.Bounds().Max.X / tileSize + yt := im.Bounds().Max.Y / tileSize + var tile [][][]byte // y -> x -> jpeg bytes + for yi := 0; yi < yt; yi++ { + var row [][]byte + for xi := 0; xi < xt; xi++ { + si := im.(subImager).SubImage(image.Rectangle{ + Min: image.Point{xi * tileSize, yi * tileSize}, + Max: image.Point{(xi + 1) * tileSize, (yi + 1) * tileSize}, + }) + buf := new(bytes.Buffer) + if err := jpeg.Encode(buf, si, &jpeg.Options{Quality: 90}); err != nil { + log.Fatal(err) + } + row = append(row, buf.Bytes()) + } + tile = append(tile, row) + } + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ms, _ := strconv.Atoi(r.FormValue("latency")) + const nanosPerMilli = 1e6 + if r.FormValue("x") != "" { + x, _ := strconv.Atoi(r.FormValue("x")) + y, _ := strconv.Atoi(r.FormValue("y")) + if ms <= 1000 { + time.Sleep(time.Duration(ms) * nanosPerMilli) + } + if x >= 0 && x < xt && y >= 0 && y < yt { + http.ServeContent(w, r, "", time.Time{}, bytes.NewReader(tile[y][x])) + return + } + } + io.WriteString(w, "") + fmt.Fprintf(w, "A grid of %d tiled images is below. Compare:

", xt*yt) + for _, ms := range []int{0, 30, 200, 1000} { + d := time.Duration(ms) * nanosPerMilli + fmt.Fprintf(w, "[HTTP/2, %v latency] [HTTP/1, %v latency]
\n", + httpsHost(), ms, d, + httpHost(), ms, d, + ) + } + io.WriteString(w, "

\n") + cacheBust := time.Now().UnixNano() + for y := 0; y < yt; y++ { + for x := 0; x < xt; x++ { + fmt.Fprintf(w, "", + tileSize, tileSize, x, y, cacheBust, ms) + } + io.WriteString(w, "
\n") + } + io.WriteString(w, "


<< Back to Go HTTP/2 demo server") + }) +} + +func httpsHost() string { + if *prod { + return "http2.golang.org" + } + if v := *addr; strings.HasPrefix(v, ":") { + return "localhost" + v + } else { + return v + } +} + +func httpHost() string { + if *prod { + return "http2.golang.org" + } + if v := *httpAddr; strings.HasPrefix(v, ":") { + return "localhost" + v + } else { + return v + } +} + +func serveProdTLS() error { + c, err := googlestorage.NewServiceClient() + if err != nil { + return err + } + slurp := func(key string) ([]byte, error) { + const bucket = "http2-demo-server-tls" + rc, _, err := c.GetObject(&googlestorage.Object{ + Bucket: bucket, + Key: key, + }) + if err != nil { + return nil, fmt.Errorf("Error fetching GCS object %q in bucket %q: %v", key, bucket, err) + } + defer rc.Close() + return ioutil.ReadAll(rc) + } + certPem, err := slurp("http2.golang.org.chained.pem") + if err != nil { + return err + } + keyPem, err := slurp("http2.golang.org.key") + if err != nil { + return err + } + cert, err := tls.X509KeyPair(certPem, keyPem) + if err != nil { + return err + } + srv := &http.Server{ + TLSConfig: &tls.Config{ + Certificates: []tls.Certificate{cert}, + }, + } + http2.ConfigureServer(srv, &http2.Server{}) + ln, err := net.Listen("tcp", ":443") + if err != nil { + return err + } + return srv.Serve(tls.NewListener(tcpKeepAliveListener{ln.(*net.TCPListener)}, srv.TLSConfig)) +} + +type tcpKeepAliveListener struct { + *net.TCPListener +} + +func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { + tc, err := ln.AcceptTCP() + if err != nil { + return + } + tc.SetKeepAlive(true) + tc.SetKeepAlivePeriod(3 * time.Minute) + return tc, nil +} + +func serveProd() error { + errc := make(chan error, 2) + go func() { errc <- http.ListenAndServe(":80", nil) }() + go func() { errc <- serveProdTLS() }() + return <-errc +} + +func main() { + var srv http.Server + flag.BoolVar(&http2.VerboseLogs, "verbose", false, "Verbose HTTP/2 debugging.") + flag.Parse() + srv.Addr = *addr + + registerHandlers() + + if *prod { + *httpAddr = "http2.golang.org" + log.Fatal(serveProd()) + } + + url := "https://" + *addr + "/" + log.Printf("Listening on " + url) + http2.ConfigureServer(&srv, &http2.Server{}) + + if *httpAddr != "" { + go func() { log.Fatal(http.ListenAndServe(*httpAddr, nil)) }() + } + + go func() { + log.Fatal(srv.ListenAndServeTLS("server.crt", "server.key")) + }() + if *openFirefox && runtime.GOOS == "darwin" { + time.Sleep(250 * time.Millisecond) + exec.Command("open", "-b", "org.mozilla.nightly", "https://localhost:4430/").Run() + } + select {} +} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/launch.go b/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/launch.go new file mode 100644 index 000000000..746661543 --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/launch.go @@ -0,0 +1,302 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +import ( + "bufio" + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "os" + "strings" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + compute "google.golang.org/api/compute/v1" +) + +var ( + proj = flag.String("project", "symbolic-datum-552", "name of Project") + zone = flag.String("zone", "us-central1-a", "GCE zone") + mach = flag.String("machinetype", "n1-standard-1", "Machine type") + instName = flag.String("instance_name", "http2-demo", "Name of VM instance.") + sshPub = flag.String("ssh_public_key", "", "ssh public key file to authorize. Can modify later in Google's web UI anyway.") + staticIP = flag.String("static_ip", "130.211.116.44", "Static IP to use. If empty, automatic.") + + writeObject = flag.String("write_object", "", "If non-empty, a VM isn't created and the flag value is Google Cloud Storage bucket/object to write. The contents from stdin.") + publicObject = flag.Bool("write_object_is_public", false, "Whether the object created by --write_object should be public.") +) + +func readFile(v string) string { + slurp, err := ioutil.ReadFile(v) + if err != nil { + log.Fatalf("Error reading %s: %v", v, err) + } + return strings.TrimSpace(string(slurp)) +} + +var config = &oauth2.Config{ + // The client-id and secret should be for an "Installed Application" when using + // the CLI. Later we'll use a web application with a callback. + ClientID: readFile("client-id.dat"), + ClientSecret: readFile("client-secret.dat"), + Endpoint: google.Endpoint, + Scopes: []string{ + compute.DevstorageFull_controlScope, + compute.ComputeScope, + "https://www.googleapis.com/auth/sqlservice", + "https://www.googleapis.com/auth/sqlservice.admin", + }, + RedirectURL: "urn:ietf:wg:oauth:2.0:oob", +} + +const baseConfig = `#cloud-config +coreos: + units: + - name: h2demo.service + command: start + content: | + [Unit] + Description=HTTP2 Demo + + [Service] + ExecStartPre=/bin/bash -c 'mkdir -p /opt/bin && curl -s -o /opt/bin/h2demo http://storage.googleapis.com/http2-demo-server-tls/h2demo && chmod +x /opt/bin/h2demo' + ExecStart=/opt/bin/h2demo --prod + RestartSec=5s + Restart=always + Type=simple + + [Install] + WantedBy=multi-user.target +` + +func main() { + flag.Parse() + if *proj == "" { + log.Fatalf("Missing --project flag") + } + prefix := "https://www.googleapis.com/compute/v1/projects/" + *proj + machType := prefix + "/zones/" + *zone + "/machineTypes/" + *mach + + const tokenFileName = "token.dat" + tokenFile := tokenCacheFile(tokenFileName) + tokenSource := oauth2.ReuseTokenSource(nil, tokenFile) + token, err := tokenSource.Token() + if err != nil { + if *writeObject != "" { + log.Fatalf("Can't use --write_object without a valid token.dat file already cached.") + } + log.Printf("Error getting token from %s: %v", tokenFileName, err) + log.Printf("Get auth code from %v", config.AuthCodeURL("my-state")) + fmt.Print("\nEnter auth code: ") + sc := bufio.NewScanner(os.Stdin) + sc.Scan() + authCode := strings.TrimSpace(sc.Text()) + token, err = config.Exchange(oauth2.NoContext, authCode) + if err != nil { + log.Fatalf("Error exchanging auth code for a token: %v", err) + } + if err := tokenFile.WriteToken(token); err != nil { + log.Fatalf("Error writing to %s: %v", tokenFileName, err) + } + tokenSource = oauth2.ReuseTokenSource(token, nil) + } + + oauthClient := oauth2.NewClient(oauth2.NoContext, tokenSource) + + if *writeObject != "" { + writeCloudStorageObject(oauthClient) + return + } + + computeService, _ := compute.New(oauthClient) + + natIP := *staticIP + if natIP == "" { + // Try to find it by name. + aggAddrList, err := computeService.Addresses.AggregatedList(*proj).Do() + if err != nil { + log.Fatal(err) + } + // http://godoc.org/code.google.com/p/google-api-go-client/compute/v1#AddressAggregatedList + IPLoop: + for _, asl := range aggAddrList.Items { + for _, addr := range asl.Addresses { + if addr.Name == *instName+"-ip" && addr.Status == "RESERVED" { + natIP = addr.Address + break IPLoop + } + } + } + } + + cloudConfig := baseConfig + if *sshPub != "" { + key := strings.TrimSpace(readFile(*sshPub)) + cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n - %s\n", key) + } + if os.Getenv("USER") == "bradfitz" { + cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n - %s\n", "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEAwks9dwWKlRC+73gRbvYtVg0vdCwDSuIlyt4z6xa/YU/jTDynM4R4W10hm2tPjy8iR1k8XhDv4/qdxe6m07NjG/By1tkmGpm1mGwho4Pr5kbAAy/Qg+NLCSdAYnnE00FQEcFOC15GFVMOW2AzDGKisReohwH9eIzHPzdYQNPRWXE= bradfitz@papag.bradfitz.com") + } + const maxCloudConfig = 32 << 10 // per compute API docs + if len(cloudConfig) > maxCloudConfig { + log.Fatalf("cloud config length of %d bytes is over %d byte limit", len(cloudConfig), maxCloudConfig) + } + + instance := &compute.Instance{ + Name: *instName, + Description: "Go Builder", + MachineType: machType, + Disks: []*compute.AttachedDisk{instanceDisk(computeService)}, + Tags: &compute.Tags{ + Items: []string{"http-server", "https-server"}, + }, + Metadata: &compute.Metadata{ + Items: []*compute.MetadataItems{ + { + Key: "user-data", + Value: cloudConfig, + }, + }, + }, + NetworkInterfaces: []*compute.NetworkInterface{ + &compute.NetworkInterface{ + AccessConfigs: []*compute.AccessConfig{ + &compute.AccessConfig{ + Type: "ONE_TO_ONE_NAT", + Name: "External NAT", + NatIP: natIP, + }, + }, + Network: prefix + "/global/networks/default", + }, + }, + ServiceAccounts: []*compute.ServiceAccount{ + { + Email: "default", + Scopes: []string{ + compute.DevstorageFull_controlScope, + compute.ComputeScope, + }, + }, + }, + } + + log.Printf("Creating instance...") + op, err := computeService.Instances.Insert(*proj, *zone, instance).Do() + if err != nil { + log.Fatalf("Failed to create instance: %v", err) + } + opName := op.Name + log.Printf("Created. Waiting on operation %v", opName) +OpLoop: + for { + time.Sleep(2 * time.Second) + op, err := computeService.ZoneOperations.Get(*proj, *zone, opName).Do() + if err != nil { + log.Fatalf("Failed to get op %s: %v", opName, err) + } + switch op.Status { + case "PENDING", "RUNNING": + log.Printf("Waiting on operation %v", opName) + continue + case "DONE": + if op.Error != nil { + for _, operr := range op.Error.Errors { + log.Printf("Error: %+v", operr) + } + log.Fatalf("Failed to start.") + } + log.Printf("Success. %+v", op) + break OpLoop + default: + log.Fatalf("Unknown status %q: %+v", op.Status, op) + } + } + + inst, err := computeService.Instances.Get(*proj, *zone, *instName).Do() + if err != nil { + log.Fatalf("Error getting instance after creation: %v", err) + } + ij, _ := json.MarshalIndent(inst, "", " ") + log.Printf("Instance: %s", ij) +} + +func instanceDisk(svc *compute.Service) *compute.AttachedDisk { + const imageURL = "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-stable-444-5-0-v20141016" + diskName := *instName + "-disk" + + return &compute.AttachedDisk{ + AutoDelete: true, + Boot: true, + Type: "PERSISTENT", + InitializeParams: &compute.AttachedDiskInitializeParams{ + DiskName: diskName, + SourceImage: imageURL, + DiskSizeGb: 50, + }, + } +} + +func writeCloudStorageObject(httpClient *http.Client) { + content := os.Stdin + const maxSlurp = 1 << 20 + var buf bytes.Buffer + n, err := io.CopyN(&buf, content, maxSlurp) + if err != nil && err != io.EOF { + log.Fatalf("Error reading from stdin: %v, %v", n, err) + } + contentType := http.DetectContentType(buf.Bytes()) + + req, err := http.NewRequest("PUT", "https://storage.googleapis.com/"+*writeObject, io.MultiReader(&buf, content)) + if err != nil { + log.Fatal(err) + } + req.Header.Set("x-goog-api-version", "2") + if *publicObject { + req.Header.Set("x-goog-acl", "public-read") + } + req.Header.Set("Content-Type", contentType) + res, err := httpClient.Do(req) + if err != nil { + log.Fatal(err) + } + if res.StatusCode != 200 { + res.Write(os.Stderr) + log.Fatalf("Failed.") + } + log.Printf("Success.") + os.Exit(0) +} + +type tokenCacheFile string + +func (f tokenCacheFile) Token() (*oauth2.Token, error) { + slurp, err := ioutil.ReadFile(string(f)) + if err != nil { + return nil, err + } + t := new(oauth2.Token) + if err := json.Unmarshal(slurp, t); err != nil { + return nil, err + } + return t, nil +} + +func (f tokenCacheFile) WriteToken(t *oauth2.Token) error { + jt, err := json.Marshal(t) + if err != nil { + return err + } + return ioutil.WriteFile(string(f), jt, 0600) +} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/rootCA.key b/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/rootCA.key new file mode 100644 index 000000000..a15a6abaf --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/rootCA.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSSR8Od0+9Q +62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoTZjkUygby +XDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYkJfODVGnV +mr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3mOoLb4yJ +JQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYWcaiW8LWZ +SUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABAoIBAFFHV7JMAqPWnMYA +nezY6J81v9+XN+7xABNWM2Q8uv4WdksbigGLTXR3/680Z2hXqJ7LMeC5XJACFT/e +/Gr0vmpgOCygnCPfjGehGKpavtfksXV3edikUlnCXsOP1C//c1bFL+sMYmFCVgTx +qYdDK8yKzXNGrKYT6q5YG7IglyRNV1rsQa8lM/5taFYiD1Ck/3tQi3YIq8Lcuser +hrxsMABcQ6mi+EIvG6Xr4mfJug0dGJMHG4RG1UGFQn6RXrQq2+q53fC8ZbVUSi0j +NQ918aKFzktwv+DouKU0ME4I9toks03gM860bAL7zCbKGmwR3hfgX/TqzVCWpG9E +LDVfvekCgYEA8fk9N53jbBRmULUGEf4qWypcLGiZnNU0OeXWpbPV9aa3H0VDytA7 +8fCN2dPAVDPqlthMDdVe983NCNwp2Yo8ZimDgowyIAKhdC25s1kejuaiH9OAPj3c +0f8KbriYX4n8zNHxFwK6Ae3pQ6EqOLJVCUsziUaZX9nyKY5aZlyX6xcCgYEAwjws +K62PjC64U5wYddNLp+kNdJ4edx+a7qBb3mEgPvSFT2RO3/xafJyG8kQB30Mfstjd +bRxyUV6N0vtX1zA7VQtRUAvfGCecpMo+VQZzcHXKzoRTnQ7eZg4Lmj5fQ9tOAKAo +QCVBoSW/DI4PZL26CAMDcAba4Pa22ooLapoRIQsCgYA6pIfkkbxLNkpxpt2YwLtt +Kr/590O7UaR9n6k8sW/aQBRDXNsILR1KDl2ifAIxpf9lnXgZJiwE7HiTfCAcW7c1 +nzwDCI0hWuHcMTS/NYsFYPnLsstyyjVZI3FY0h4DkYKV9Q9z3zJLQ2hz/nwoD3gy +b2pHC7giFcTts1VPV4Nt8wKBgHeFn4ihHJweg76vZz3Z78w7VNRWGFklUalVdDK7 +gaQ7w2y/ROn/146mo0OhJaXFIFRlrpvdzVrU3GDf2YXJYDlM5ZRkObwbZADjksev +WInzcgDy3KDg7WnPasRXbTfMU4t/AkW2p1QKbi3DnSVYuokDkbH2Beo45vxDxhKr +C69RAoGBAIyo3+OJenoZmoNzNJl2WPW5MeBUzSh8T/bgyjFTdqFHF5WiYRD/lfHj +x9Glyw2nutuT4hlOqHvKhgTYdDMsF2oQ72fe3v8Q5FU7FuKndNPEAyvKNXZaShVA +hnlhv5DjXKb0wFWnt5PCCiQLtzG0yyHaITrrEme7FikkIcTxaX/Y +-----END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/rootCA.pem b/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/rootCA.pem new file mode 100644 index 000000000..3a323e774 --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/rootCA.pem @@ -0,0 +1,26 @@ +-----BEGIN CERTIFICATE----- +MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV +BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG +A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 +DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 +NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG +cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv +c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS +R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT +ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk +JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 +mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW +caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G +A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt +hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB +MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES +MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv +bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h +U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao +eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 +UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD +58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n +sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF +kPe6XoSbiLm/kxk32T0= +-----END CERTIFICATE----- diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/rootCA.srl b/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/rootCA.srl new file mode 100644 index 000000000..6db389188 --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/rootCA.srl @@ -0,0 +1 @@ +E2CE26BF3285059C diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/server.crt b/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/server.crt new file mode 100644 index 000000000..c59059bd6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/server.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDPjCCAiYCCQDizia/MoUFnDANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJV +UzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xFDASBgNVBAoT +C0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhvc3QxHTAbBgkqhkiG9w0BCQEW +DmJyYWRAZGFuZ2EuY29tMB4XDTE0MDcxNTIwNTAyN1oXDTE1MTEyNzIwNTAyN1ow +RzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMQswCQYDVQQHEwJTRjEeMBwGA1UE +ChMVYnJhZGZpdHogaHR0cDIgc2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAs1Y9CyLFrdL8VQWN1WaifDqaZFnoqjHhCMlc1TfG2zA+InDifx2l +gZD3o8FeNnAcfM2sPlk3+ZleOYw9P/CklFVDlvqmpCv9ss/BEp/dDaWvy1LmJ4c2 +dbQJfmTxn7CV1H3TsVJvKdwFmdoABb41NoBp6+NNO7OtDyhbIMiCI0pL3Nefb3HL +A7hIMo3DYbORTtJLTIH9W8YKrEWL0lwHLrYFx/UdutZnv+HjdmO6vCN4na55mjws +/vjKQUmc7xeY7Xe20xDEG2oDKVkL2eD7FfyrYMS3rO1ExP2KSqlXYG/1S9I/fz88 +F0GK7HX55b5WjZCl2J3ERVdnv/0MQv+sYQIDAQABMA0GCSqGSIb3DQEBBQUAA4IB +AQC0zL+n/YpRZOdulSu9tS8FxrstXqGWoxfe+vIUgqfMZ5+0MkjJ/vW0FqlLDl2R +rn4XaR3e7FmWkwdDVbq/UB6lPmoAaFkCgh9/5oapMaclNVNnfF3fjCJfRr+qj/iD +EmJStTIN0ZuUjAlpiACmfnpEU55PafT5Zx+i1yE4FGjw8bJpFoyD4Hnm54nGjX19 +KeCuvcYFUPnBm3lcL0FalF2AjqV02WTHYNQk7YF/oeO7NKBoEgvGvKG3x+xaOeBI +dwvdq175ZsGul30h+QjrRlXhH/twcuaT3GSdoysDl9cCYE8f1Mk8PD6gan3uBCJU +90p6/CbU71bGbfpM2PHot2fm +-----END CERTIFICATE----- diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/server.key b/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/server.key new file mode 100644 index 000000000..f329c1421 --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/server.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAs1Y9CyLFrdL8VQWN1WaifDqaZFnoqjHhCMlc1TfG2zA+InDi +fx2lgZD3o8FeNnAcfM2sPlk3+ZleOYw9P/CklFVDlvqmpCv9ss/BEp/dDaWvy1Lm +J4c2dbQJfmTxn7CV1H3TsVJvKdwFmdoABb41NoBp6+NNO7OtDyhbIMiCI0pL3Nef +b3HLA7hIMo3DYbORTtJLTIH9W8YKrEWL0lwHLrYFx/UdutZnv+HjdmO6vCN4na55 +mjws/vjKQUmc7xeY7Xe20xDEG2oDKVkL2eD7FfyrYMS3rO1ExP2KSqlXYG/1S9I/ +fz88F0GK7HX55b5WjZCl2J3ERVdnv/0MQv+sYQIDAQABAoIBADQ2spUwbY+bcz4p +3M66ECrNQTBggP40gYl2XyHxGGOu2xhZ94f9ELf1hjRWU2DUKWco1rJcdZClV6q3 +qwmXvcM2Q/SMS8JW0ImkNVl/0/NqPxGatEnj8zY30d/L8hGFb0orzFu/XYA5gCP4 +NbN2WrXgk3ZLeqwcNxHHtSiJWGJ/fPyeDWAu/apy75u9Xf2GlzBZmV6HYD9EfK80 +LTlI60f5FO487CrJnboL7ovPJrIHn+k05xRQqwma4orpz932rTXnTjs9Lg6KtbQN +a7PrqfAntIISgr11a66Mng3IYH1lYqJsWJJwX/xHT4WLEy0EH4/0+PfYemJekz2+ +Co62drECgYEA6O9zVJZXrLSDsIi54cfxA7nEZWm5CAtkYWeAHa4EJ+IlZ7gIf9sL +W8oFcEfFGpvwVqWZ+AsQ70dsjXAv3zXaG0tmg9FtqWp7pzRSMPidifZcQwWkKeTO +gJnFmnVyed8h6GfjTEu4gxo1/S5U0V+mYSha01z5NTnN6ltKx1Or3b0CgYEAxRgm +S30nZxnyg/V7ys61AZhst1DG2tkZXEMcA7dYhabMoXPJAP/EfhlWwpWYYUs/u0gS +Wwmf5IivX5TlYScgmkvb/NYz0u4ZmOXkLTnLPtdKKFXhjXJcHjUP67jYmOxNlJLp +V4vLRnFxTpffAV+OszzRxsXX6fvruwZBANYJeXUCgYBVouLFsFgfWGYp2rpr9XP4 +KK25kvrBqF6JKOIDB1zjxNJ3pUMKrl8oqccCFoCyXa4oTM2kUX0yWxHfleUjrMq4 +yimwQKiOZmV7fVLSSjSw6e/VfBd0h3gb82ygcplZkN0IclkwTY5SNKqwn/3y07V5 +drqdhkrgdJXtmQ6O5YYECQKBgATERcDToQ1USlI4sKrB/wyv1AlG8dg/IebiVJ4e +ZAyvcQmClFzq0qS+FiQUnB/WQw9TeeYrwGs1hxBHuJh16srwhLyDrbMvQP06qh8R +48F8UXXSRec22dV9MQphaROhu2qZdv1AC0WD3tqov6L33aqmEOi+xi8JgbT/PLk5 +c/c1AoGBAI1A/02ryksW6/wc7/6SP2M2rTy4m1sD/GnrTc67EHnRcVBdKO6qH2RY +nqC8YcveC2ZghgPTDsA3VGuzuBXpwY6wTyV99q6jxQJ6/xcrD9/NUG6Uwv/xfCxl +IJLeBYEqQundSSny3VtaAUK8Ul1nxpTvVRNwtcyWTo8RHAAyNPWd +-----END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/h2i/README.md b/Godeps/_workspace/src/github.com/bradfitz/http2/h2i/README.md new file mode 100644 index 000000000..3ceecb7d1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/h2i/README.md @@ -0,0 +1,97 @@ +# h2i + +**h2i** is an interactive HTTP/2 ("h2") console debugger. Miss the good ol' +days of telnetting to your HTTP/1.n servers? We're bringing you +back. + +Features: +- send raw HTTP/2 frames + - PING + - SETTINGS + - HEADERS + - etc +- type in HTTP/1.n and have it auto-HPACK/frame-ify it for HTTP/2 +- pretty print all received HTTP/2 frames from the peer (including HPACK decoding) +- tab completion of commands, options + +Not yet features, but soon: +- unnecessary CONTINUATION frames on short boundaries, to test peer implementations +- request bodies (DATA frames) +- send invalid frames for testing server implementations (supported by underlying Framer) + +Later: +- act like a server + +## Installation + +``` +$ go get github.com/bradfitz/http2/h2i +$ h2i +``` + +## Demo + +``` +$ h2i +Usage: h2i + + -insecure + Whether to skip TLS cert validation + -nextproto string + Comma-separated list of NPN/ALPN protocol names to negotiate. (default "h2,h2-14") + +$ h2i google.com +Connecting to google.com:443 ... +Connected to 74.125.224.41:443 +Negotiated protocol "h2-14" +[FrameHeader SETTINGS len=18] + [MAX_CONCURRENT_STREAMS = 100] + [INITIAL_WINDOW_SIZE = 1048576] + [MAX_FRAME_SIZE = 16384] +[FrameHeader WINDOW_UPDATE len=4] + Window-Increment = 983041 + +h2i> PING h2iSayHI +[FrameHeader PING flags=ACK len=8] + Data = "h2iSayHI" +h2i> headers +(as HTTP/1.1)> GET / HTTP/1.1 +(as HTTP/1.1)> Host: ip.appspot.com +(as HTTP/1.1)> User-Agent: h2i/brad-n-blake +(as HTTP/1.1)> +Opening Stream-ID 1: + :authority = ip.appspot.com + :method = GET + :path = / + :scheme = https + user-agent = h2i/brad-n-blake +[FrameHeader HEADERS flags=END_HEADERS stream=1 len=77] + :status = "200" + alternate-protocol = "443:quic,p=1" + content-length = "15" + content-type = "text/html" + date = "Fri, 01 May 2015 23:06:56 GMT" + server = "Google Frontend" +[FrameHeader DATA flags=END_STREAM stream=1 len=15] + "173.164.155.78\n" +[FrameHeader PING len=8] + Data = "\x00\x00\x00\x00\x00\x00\x00\x00" +h2i> ping +[FrameHeader PING flags=ACK len=8] + Data = "h2i_ping" +h2i> ping +[FrameHeader PING flags=ACK len=8] + Data = "h2i_ping" +h2i> ping +[FrameHeader GOAWAY len=22] + Last-Stream-ID = 1; Error-Code = PROTOCOL_ERROR (1) + +ReadFrame: EOF +``` + +## Status + +Quick few hour hack. So much yet to do. Feel free to file issues for +bugs or wishlist items, but [@bmizerany](https://github.com/bmizerany/) +and I aren't yet accepting pull requests until things settle down. + diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/h2i/h2i.go b/Godeps/_workspace/src/github.com/bradfitz/http2/h2i/h2i.go new file mode 100644 index 000000000..67b874d6b --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/h2i/h2i.go @@ -0,0 +1,489 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// See https://code.google.com/p/go/source/browse/CONTRIBUTORS +// Licensed under the same terms as Go itself: +// https://code.google.com/p/go/source/browse/LICENSE + +/* +The h2i command is an interactive HTTP/2 console. + +Usage: + $ h2i [flags] + +Interactive commands in the console: (all parts case-insensitive) + + ping [data] + settings ack + settings FOO=n BAR=z + headers (open a new stream by typing HTTP/1.1) +*/ +package main + +import ( + "bufio" + "bytes" + "crypto/tls" + "errors" + "flag" + "fmt" + "io" + "log" + "net" + "net/http" + "os" + "regexp" + "strconv" + "strings" + + "github.com/bradfitz/http2" + "github.com/bradfitz/http2/hpack" + "golang.org/x/crypto/ssh/terminal" +) + +// Flags +var ( + flagNextProto = flag.String("nextproto", "h2,h2-14", "Comma-separated list of NPN/ALPN protocol names to negotiate.") + flagInsecure = flag.Bool("insecure", false, "Whether to skip TLS cert validation") +) + +type command struct { + run func(*h2i, []string) error // required + + // complete optionally specifies tokens (case-insensitive) which are + // valid for this subcommand. + complete func() []string +} + +var commands = map[string]command{ + "ping": command{run: (*h2i).cmdPing}, + "settings": command{ + run: (*h2i).cmdSettings, + complete: func() []string { + return []string{ + "ACK", + http2.SettingHeaderTableSize.String(), + http2.SettingEnablePush.String(), + http2.SettingMaxConcurrentStreams.String(), + http2.SettingInitialWindowSize.String(), + http2.SettingMaxFrameSize.String(), + http2.SettingMaxHeaderListSize.String(), + } + }, + }, + "quit": command{run: (*h2i).cmdQuit}, + "headers": command{run: (*h2i).cmdHeaders}, +} + +func usage() { + fmt.Fprintf(os.Stderr, "Usage: h2i \n\n") + flag.PrintDefaults() + os.Exit(1) +} + +// withPort adds ":443" if another port isn't already present. +func withPort(host string) string { + if _, _, err := net.SplitHostPort(host); err != nil { + return net.JoinHostPort(host, "443") + } + return host +} + +// h2i is the app's state. +type h2i struct { + host string + tc *tls.Conn + framer *http2.Framer + term *terminal.Terminal + + // owned by the command loop: + streamID uint32 + hbuf bytes.Buffer + henc *hpack.Encoder + + // owned by the readFrames loop: + peerSetting map[http2.SettingID]uint32 + hdec *hpack.Decoder +} + +func main() { + flag.Usage = usage + flag.Parse() + if flag.NArg() != 1 { + usage() + } + log.SetFlags(0) + + host := flag.Arg(0) + app := &h2i{ + host: host, + peerSetting: make(map[http2.SettingID]uint32), + } + app.henc = hpack.NewEncoder(&app.hbuf) + + if err := app.Main(); err != nil { + if app.term != nil { + app.logf("%v\n", err) + } else { + fmt.Fprintf(os.Stderr, "%v\n", err) + } + os.Exit(1) + } + fmt.Fprintf(os.Stdout, "\n") +} + +func (app *h2i) Main() error { + cfg := &tls.Config{ + ServerName: app.host, + NextProtos: strings.Split(*flagNextProto, ","), + InsecureSkipVerify: *flagInsecure, + } + + hostAndPort := withPort(app.host) + log.Printf("Connecting to %s ...", hostAndPort) + tc, err := tls.Dial("tcp", hostAndPort, cfg) + if err != nil { + return fmt.Errorf("Error dialing %s: %v", withPort(app.host), err) + } + log.Printf("Connected to %v", tc.RemoteAddr()) + defer tc.Close() + + if err := tc.Handshake(); err != nil { + return fmt.Errorf("TLS handshake: %v", err) + } + if !*flagInsecure { + if err := tc.VerifyHostname(app.host); err != nil { + return fmt.Errorf("VerifyHostname: %v", err) + } + } + state := tc.ConnectionState() + log.Printf("Negotiated protocol %q", state.NegotiatedProtocol) + if !state.NegotiatedProtocolIsMutual || state.NegotiatedProtocol == "" { + return fmt.Errorf("Could not negotiate protocol mutually") + } + + if _, err := io.WriteString(tc, http2.ClientPreface); err != nil { + return err + } + + app.framer = http2.NewFramer(tc, tc) + + oldState, err := terminal.MakeRaw(0) + if err != nil { + return err + } + defer terminal.Restore(0, oldState) + + var screen = struct { + io.Reader + io.Writer + }{os.Stdin, os.Stdout} + + app.term = terminal.NewTerminal(screen, "h2i> ") + lastWord := regexp.MustCompile(`.+\W(\w+)$`) + app.term.AutoCompleteCallback = func(line string, pos int, key rune) (newLine string, newPos int, ok bool) { + if key != '\t' { + return + } + if pos != len(line) { + // TODO: we're being lazy for now, only supporting tab completion at the end. + return + } + // Auto-complete for the command itself. + if !strings.Contains(line, " ") { + var name string + name, _, ok = lookupCommand(line) + if !ok { + return + } + return name, len(name), true + } + _, c, ok := lookupCommand(line[:strings.IndexByte(line, ' ')]) + if !ok || c.complete == nil { + return + } + if strings.HasSuffix(line, " ") { + app.logf("%s", strings.Join(c.complete(), " ")) + return line, pos, true + } + m := lastWord.FindStringSubmatch(line) + if m == nil { + return line, len(line), true + } + soFar := m[1] + var match []string + for _, cand := range c.complete() { + if len(soFar) > len(cand) || !strings.EqualFold(cand[:len(soFar)], soFar) { + continue + } + match = append(match, cand) + } + if len(match) == 0 { + return + } + if len(match) > 1 { + // TODO: auto-complete any common prefix + app.logf("%s", strings.Join(match, " ")) + return line, pos, true + } + newLine = line[:len(line)-len(soFar)] + match[0] + return newLine, len(newLine), true + + } + + errc := make(chan error, 2) + go func() { errc <- app.readFrames() }() + go func() { errc <- app.readConsole() }() + return <-errc +} + +func (app *h2i) logf(format string, args ...interface{}) { + fmt.Fprintf(app.term, format+"\n", args...) +} + +func (app *h2i) readConsole() error { + for { + line, err := app.term.ReadLine() + if err == io.EOF { + return nil + } + if err != nil { + return fmt.Errorf("terminal.ReadLine: %v", err) + } + f := strings.Fields(line) + if len(f) == 0 { + continue + } + cmd, args := f[0], f[1:] + if _, c, ok := lookupCommand(cmd); ok { + err = c.run(app, args) + } else { + app.logf("Unknown command %q", line) + } + if err == errExitApp { + return nil + } + if err != nil { + return err + } + } +} + +func lookupCommand(prefix string) (name string, c command, ok bool) { + prefix = strings.ToLower(prefix) + if c, ok = commands[prefix]; ok { + return prefix, c, ok + } + + for full, candidate := range commands { + if strings.HasPrefix(full, prefix) { + if c.run != nil { + return "", command{}, false // ambiguous + } + c = candidate + name = full + } + } + return name, c, c.run != nil +} + +var errExitApp = errors.New("internal sentinel error value to quit the console reading loop") + +func (a *h2i) cmdQuit(args []string) error { + if len(args) > 0 { + a.logf("the QUIT command takes no argument") + return nil + } + return errExitApp +} + +func (a *h2i) cmdSettings(args []string) error { + if len(args) == 1 && strings.EqualFold(args[0], "ACK") { + return a.framer.WriteSettingsAck() + } + var settings []http2.Setting + for _, arg := range args { + if strings.EqualFold(arg, "ACK") { + a.logf("Error: ACK must be only argument with the SETTINGS command") + return nil + } + eq := strings.Index(arg, "=") + if eq == -1 { + a.logf("Error: invalid argument %q (expected SETTING_NAME=nnnn)", arg) + return nil + } + sid, ok := settingByName(arg[:eq]) + if !ok { + a.logf("Error: unknown setting name %q", arg[:eq]) + return nil + } + val, err := strconv.ParseUint(arg[eq+1:], 10, 32) + if err != nil { + a.logf("Error: invalid argument %q (expected SETTING_NAME=nnnn)", arg) + return nil + } + settings = append(settings, http2.Setting{ + ID: sid, + Val: uint32(val), + }) + } + a.logf("Sending: %v", settings) + return a.framer.WriteSettings(settings...) +} + +func settingByName(name string) (http2.SettingID, bool) { + for _, sid := range [...]http2.SettingID{ + http2.SettingHeaderTableSize, + http2.SettingEnablePush, + http2.SettingMaxConcurrentStreams, + http2.SettingInitialWindowSize, + http2.SettingMaxFrameSize, + http2.SettingMaxHeaderListSize, + } { + if strings.EqualFold(sid.String(), name) { + return sid, true + } + } + return 0, false +} + +func (app *h2i) cmdPing(args []string) error { + if len(args) > 1 { + app.logf("invalid PING usage: only accepts 0 or 1 args") + return nil // nil means don't end the program + } + var data [8]byte + if len(args) == 1 { + copy(data[:], args[0]) + } else { + copy(data[:], "h2i_ping") + } + return app.framer.WritePing(false, data) +} + +func (app *h2i) cmdHeaders(args []string) error { + if len(args) > 0 { + app.logf("Error: HEADERS doesn't yet take arguments.") + // TODO: flags for restricting window size, to force CONTINUATION + // frames. + return nil + } + var h1req bytes.Buffer + app.term.SetPrompt("(as HTTP/1.1)> ") + defer app.term.SetPrompt("h2i> ") + for { + line, err := app.term.ReadLine() + if err != nil { + return err + } + h1req.WriteString(line) + h1req.WriteString("\r\n") + if line == "" { + break + } + } + req, err := http.ReadRequest(bufio.NewReader(&h1req)) + if err != nil { + app.logf("Invalid HTTP/1.1 request: %v", err) + return nil + } + if app.streamID == 0 { + app.streamID = 1 + } else { + app.streamID += 2 + } + app.logf("Opening Stream-ID %d:", app.streamID) + hbf := app.encodeHeaders(req) + if len(hbf) > 16<<10 { + app.logf("TODO: h2i doesn't yet write CONTINUATION frames. Copy it from transport.go") + return nil + } + return app.framer.WriteHeaders(http2.HeadersFrameParam{ + StreamID: app.streamID, + BlockFragment: hbf, + EndStream: req.Method == "GET" || req.Method == "HEAD", // good enough for now + EndHeaders: true, // for now + }) +} + +func (app *h2i) readFrames() error { + for { + f, err := app.framer.ReadFrame() + if err != nil { + return fmt.Errorf("ReadFrame: %v", err) + } + app.logf("%v", f) + switch f := f.(type) { + case *http2.PingFrame: + app.logf(" Data = %q", f.Data) + case *http2.SettingsFrame: + f.ForeachSetting(func(s http2.Setting) error { + app.logf(" %v", s) + app.peerSetting[s.ID] = s.Val + return nil + }) + case *http2.WindowUpdateFrame: + app.logf(" Window-Increment = %v\n", f.Increment) + case *http2.GoAwayFrame: + app.logf(" Last-Stream-ID = %d; Error-Code = %v (%d)\n", f.LastStreamID, f.ErrCode, f.ErrCode) + case *http2.DataFrame: + app.logf(" %q", f.Data()) + case *http2.HeadersFrame: + if f.HasPriority() { + app.logf(" PRIORITY = %v", f.Priority) + } + if app.hdec == nil { + // TODO: if the user uses h2i to send a SETTINGS frame advertising + // something larger, we'll need to respect SETTINGS_HEADER_TABLE_SIZE + // and stuff here instead of using the 4k default. But for now: + tableSize := uint32(4 << 10) + app.hdec = hpack.NewDecoder(tableSize, app.onNewHeaderField) + } + app.hdec.Write(f.HeaderBlockFragment()) + } + } +} + +// called from readLoop +func (app *h2i) onNewHeaderField(f hpack.HeaderField) { + if f.Sensitive { + app.logf(" %s = %q (SENSITIVE)", f.Name, f.Value) + } + app.logf(" %s = %q", f.Name, f.Value) +} + +func (app *h2i) encodeHeaders(req *http.Request) []byte { + app.hbuf.Reset() + + // TODO(bradfitz): figure out :authority-vs-Host stuff between http2 and Go + host := req.Host + if host == "" { + host = req.URL.Host + } + + path := req.URL.Path + if path == "" { + path = "/" + } + + app.writeHeader(":authority", host) // probably not right for all sites + app.writeHeader(":method", req.Method) + app.writeHeader(":path", path) + app.writeHeader(":scheme", "https") + + for k, vv := range req.Header { + lowKey := strings.ToLower(k) + if lowKey == "host" { + continue + } + for _, v := range vv { + app.writeHeader(lowKey, v) + } + } + return app.hbuf.Bytes() +} + +func (app *h2i) writeHeader(name, value string) { + app.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) + app.logf(" %s = %s", name, value) +} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/headermap.go b/Godeps/_workspace/src/github.com/bradfitz/http2/headermap.go new file mode 100644 index 000000000..67c7c4835 --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/headermap.go @@ -0,0 +1,80 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// See https://code.google.com/p/go/source/browse/CONTRIBUTORS +// Licensed under the same terms as Go itself: +// https://code.google.com/p/go/source/browse/LICENSE + +package http2 + +import ( + "net/http" + "strings" +) + +var ( + commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case + commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case +) + +func init() { + for _, v := range []string{ + "accept", + "accept-charset", + "accept-encoding", + "accept-language", + "accept-ranges", + "age", + "access-control-allow-origin", + "allow", + "authorization", + "cache-control", + "content-disposition", + "content-encoding", + "content-language", + "content-length", + "content-location", + "content-range", + "content-type", + "cookie", + "date", + "etag", + "expect", + "expires", + "from", + "host", + "if-match", + "if-modified-since", + "if-none-match", + "if-unmodified-since", + "last-modified", + "link", + "location", + "max-forwards", + "proxy-authenticate", + "proxy-authorization", + "range", + "referer", + "refresh", + "retry-after", + "server", + "set-cookie", + "strict-transport-security", + "transfer-encoding", + "user-agent", + "vary", + "via", + "www-authenticate", + } { + chk := http.CanonicalHeaderKey(v) + commonLowerHeader[chk] = v + commonCanonHeader[v] = chk + } +} + +func lowerHeader(v string) string { + if s, ok := commonLowerHeader[v]; ok { + return s + } + return strings.ToLower(v) +} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/hpack/encode.go b/Godeps/_workspace/src/github.com/bradfitz/http2/hpack/encode.go new file mode 100644 index 000000000..19bd9f4fc --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/hpack/encode.go @@ -0,0 +1,252 @@ +// Copyright 2014 The Go Authors. +// See https://code.google.com/p/go/source/browse/CONTRIBUTORS +// Licensed under the same terms as Go itself: +// https://code.google.com/p/go/source/browse/LICENSE + +package hpack + +import ( + "io" +) + +const ( + uint32Max = ^uint32(0) + initialHeaderTableSize = 4096 +) + +type Encoder struct { + dynTab dynamicTable + // minSize is the minimum table size set by + // SetMaxDynamicTableSize after the previous Header Table Size + // Update. + minSize uint32 + // maxSizeLimit is the maximum table size this encoder + // supports. This will protect the encoder from too large + // size. + maxSizeLimit uint32 + // tableSizeUpdate indicates whether "Header Table Size + // Update" is required. + tableSizeUpdate bool + w io.Writer + buf []byte +} + +// NewEncoder returns a new Encoder which performs HPACK encoding. An +// encoded data is written to w. +func NewEncoder(w io.Writer) *Encoder { + e := &Encoder{ + minSize: uint32Max, + maxSizeLimit: initialHeaderTableSize, + tableSizeUpdate: false, + w: w, + } + e.dynTab.setMaxSize(initialHeaderTableSize) + return e +} + +// WriteField encodes f into a single Write to e's underlying Writer. +// This function may also produce bytes for "Header Table Size Update" +// if necessary. If produced, it is done before encoding f. +func (e *Encoder) WriteField(f HeaderField) error { + e.buf = e.buf[:0] + + if e.tableSizeUpdate { + e.tableSizeUpdate = false + if e.minSize < e.dynTab.maxSize { + e.buf = appendTableSize(e.buf, e.minSize) + } + e.minSize = uint32Max + e.buf = appendTableSize(e.buf, e.dynTab.maxSize) + } + + idx, nameValueMatch := e.searchTable(f) + if nameValueMatch { + e.buf = appendIndexed(e.buf, idx) + } else { + indexing := e.shouldIndex(f) + if indexing { + e.dynTab.add(f) + } + + if idx == 0 { + e.buf = appendNewName(e.buf, f, indexing) + } else { + e.buf = appendIndexedName(e.buf, f, idx, indexing) + } + } + n, err := e.w.Write(e.buf) + if err == nil && n != len(e.buf) { + err = io.ErrShortWrite + } + return err +} + +// searchTable searches f in both stable and dynamic header tables. +// The static header table is searched first. Only when there is no +// exact match for both name and value, the dynamic header table is +// then searched. If there is no match, i is 0. If both name and value +// match, i is the matched index and nameValueMatch becomes true. If +// only name matches, i points to that index and nameValueMatch +// becomes false. +func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) { + for idx, hf := range staticTable { + if !constantTimeStringCompare(hf.Name, f.Name) { + continue + } + if i == 0 { + i = uint64(idx + 1) + } + if f.Sensitive { + continue + } + if !constantTimeStringCompare(hf.Value, f.Value) { + continue + } + i = uint64(idx + 1) + nameValueMatch = true + return + } + + j, nameValueMatch := e.dynTab.search(f) + if nameValueMatch || (i == 0 && j != 0) { + i = j + uint64(len(staticTable)) + } + return +} + +// SetMaxDynamicTableSize changes the dynamic header table size to v. +// The actual size is bounded by the value passed to +// SetMaxDynamicTableSizeLimit. +func (e *Encoder) SetMaxDynamicTableSize(v uint32) { + if v > e.maxSizeLimit { + v = e.maxSizeLimit + } + if v < e.minSize { + e.minSize = v + } + e.tableSizeUpdate = true + e.dynTab.setMaxSize(v) +} + +// SetMaxDynamicTableSizeLimit changes the maximum value that can be +// specified in SetMaxDynamicTableSize to v. By default, it is set to +// 4096, which is the same size of the default dynamic header table +// size described in HPACK specification. If the current maximum +// dynamic header table size is strictly greater than v, "Header Table +// Size Update" will be done in the next WriteField call and the +// maximum dynamic header table size is truncated to v. +func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) { + e.maxSizeLimit = v + if e.dynTab.maxSize > v { + e.tableSizeUpdate = true + e.dynTab.setMaxSize(v) + } +} + +// shouldIndex reports whether f should be indexed. +func (e *Encoder) shouldIndex(f HeaderField) bool { + return !f.Sensitive && f.size() <= e.dynTab.maxSize +} + +// appendIndexed appends index i, as encoded in "Indexed Header Field" +// representation, to dst and returns the extended buffer. +func appendIndexed(dst []byte, i uint64) []byte { + first := len(dst) + dst = appendVarInt(dst, 7, i) + dst[first] |= 0x80 + return dst +} + +// appendNewName appends f, as encoded in one of "Literal Header field +// - New Name" representation variants, to dst and returns the +// extended buffer. +// +// If f.Sensitive is true, "Never Indexed" representation is used. If +// f.Sensitive is false and indexing is true, "Inremental Indexing" +// representation is used. +func appendNewName(dst []byte, f HeaderField, indexing bool) []byte { + dst = append(dst, encodeTypeByte(indexing, f.Sensitive)) + dst = appendHpackString(dst, f.Name) + return appendHpackString(dst, f.Value) +} + +// appendIndexedName appends f and index i referring indexed name +// entry, as encoded in one of "Literal Header field - Indexed Name" +// representation variants, to dst and returns the extended buffer. +// +// If f.Sensitive is true, "Never Indexed" representation is used. If +// f.Sensitive is false and indexing is true, "Incremental Indexing" +// representation is used. +func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte { + first := len(dst) + var n byte + if indexing { + n = 6 + } else { + n = 4 + } + dst = appendVarInt(dst, n, i) + dst[first] |= encodeTypeByte(indexing, f.Sensitive) + return appendHpackString(dst, f.Value) +} + +// appendTableSize appends v, as encoded in "Header Table Size Update" +// representation, to dst and returns the extended buffer. +func appendTableSize(dst []byte, v uint32) []byte { + first := len(dst) + dst = appendVarInt(dst, 5, uint64(v)) + dst[first] |= 0x20 + return dst +} + +// appendVarInt appends i, as encoded in variable integer form using n +// bit prefix, to dst and returns the extended buffer. +// +// See +// http://http2.github.io/http2-spec/compression.html#integer.representation +func appendVarInt(dst []byte, n byte, i uint64) []byte { + k := uint64((1 << n) - 1) + if i < k { + return append(dst, byte(i)) + } + dst = append(dst, byte(k)) + i -= k + for ; i >= 128; i >>= 7 { + dst = append(dst, byte(0x80|(i&0x7f))) + } + return append(dst, byte(i)) +} + +// appendHpackString appends s, as encoded in "String Literal" +// representation, to dst and returns the the extended buffer. +// +// s will be encoded in Huffman codes only when it produces strictly +// shorter byte string. +func appendHpackString(dst []byte, s string) []byte { + huffmanLength := HuffmanEncodeLength(s) + if huffmanLength < uint64(len(s)) { + first := len(dst) + dst = appendVarInt(dst, 7, huffmanLength) + dst = AppendHuffmanString(dst, s) + dst[first] |= 0x80 + } else { + dst = appendVarInt(dst, 7, uint64(len(s))) + dst = append(dst, s...) + } + return dst +} + +// encodeTypeByte returns type byte. If sensitive is true, type byte +// for "Never Indexed" representation is returned. If sensitive is +// false and indexing is true, type byte for "Incremental Indexing" +// representation is returned. Otherwise, type byte for "Without +// Indexing" is returned. +func encodeTypeByte(indexing, sensitive bool) byte { + if sensitive { + return 0x10 + } + if indexing { + return 0x40 + } + return 0 +} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/hpack/encode_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/hpack/encode_test.go new file mode 100644 index 000000000..dce66c90f --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/hpack/encode_test.go @@ -0,0 +1,331 @@ +// Copyright 2014 The Go Authors. +// See https://code.google.com/p/go/source/browse/CONTRIBUTORS +// Licensed under the same terms as Go itself: +// https://code.google.com/p/go/source/browse/LICENSE + +package hpack + +import ( + "bytes" + "encoding/hex" + "reflect" + "strings" + "testing" +) + +func TestEncoderTableSizeUpdate(t *testing.T) { + tests := []struct { + size1, size2 uint32 + wantHex string + }{ + // Should emit 2 table size updates (2048 and 4096) + {2048, 4096, "3fe10f 3fe11f 82"}, + + // Should emit 1 table size update (2048) + {16384, 2048, "3fe10f 82"}, + } + for _, tt := range tests { + var buf bytes.Buffer + e := NewEncoder(&buf) + e.SetMaxDynamicTableSize(tt.size1) + e.SetMaxDynamicTableSize(tt.size2) + if err := e.WriteField(pair(":method", "GET")); err != nil { + t.Fatal(err) + } + want := removeSpace(tt.wantHex) + if got := hex.EncodeToString(buf.Bytes()); got != want { + t.Errorf("e.SetDynamicTableSize %v, %v = %q; want %q", tt.size1, tt.size2, got, want) + } + } +} + +func TestEncoderWriteField(t *testing.T) { + var buf bytes.Buffer + e := NewEncoder(&buf) + var got []HeaderField + d := NewDecoder(4<<10, func(f HeaderField) { + got = append(got, f) + }) + + tests := []struct { + hdrs []HeaderField + }{ + {[]HeaderField{ + pair(":method", "GET"), + pair(":scheme", "http"), + pair(":path", "/"), + pair(":authority", "www.example.com"), + }}, + {[]HeaderField{ + pair(":method", "GET"), + pair(":scheme", "http"), + pair(":path", "/"), + pair(":authority", "www.example.com"), + pair("cache-control", "no-cache"), + }}, + {[]HeaderField{ + pair(":method", "GET"), + pair(":scheme", "https"), + pair(":path", "/index.html"), + pair(":authority", "www.example.com"), + pair("custom-key", "custom-value"), + }}, + } + for i, tt := range tests { + buf.Reset() + got = got[:0] + for _, hf := range tt.hdrs { + if err := e.WriteField(hf); err != nil { + t.Fatal(err) + } + } + _, err := d.Write(buf.Bytes()) + if err != nil { + t.Errorf("%d. Decoder Write = %v", i, err) + } + if !reflect.DeepEqual(got, tt.hdrs) { + t.Errorf("%d. Decoded %+v; want %+v", i, got, tt.hdrs) + } + } +} + +func TestEncoderSearchTable(t *testing.T) { + e := NewEncoder(nil) + + e.dynTab.add(pair("foo", "bar")) + e.dynTab.add(pair("blake", "miz")) + e.dynTab.add(pair(":method", "GET")) + + tests := []struct { + hf HeaderField + wantI uint64 + wantMatch bool + }{ + // Name and Value match + {pair("foo", "bar"), uint64(len(staticTable) + 3), true}, + {pair("blake", "miz"), uint64(len(staticTable) + 2), true}, + {pair(":method", "GET"), 2, true}, + + // Only name match because Sensitive == true + {HeaderField{":method", "GET", true}, 2, false}, + + // Only Name matches + {pair("foo", "..."), uint64(len(staticTable) + 3), false}, + {pair("blake", "..."), uint64(len(staticTable) + 2), false}, + {pair(":method", "..."), 2, false}, + + // None match + {pair("foo-", "bar"), 0, false}, + } + for _, tt := range tests { + if gotI, gotMatch := e.searchTable(tt.hf); gotI != tt.wantI || gotMatch != tt.wantMatch { + t.Errorf("d.search(%+v) = %v, %v; want %v, %v", tt.hf, gotI, gotMatch, tt.wantI, tt.wantMatch) + } + } +} + +func TestAppendVarInt(t *testing.T) { + tests := []struct { + n byte + i uint64 + want []byte + }{ + // Fits in a byte: + {1, 0, []byte{0}}, + {2, 2, []byte{2}}, + {3, 6, []byte{6}}, + {4, 14, []byte{14}}, + {5, 30, []byte{30}}, + {6, 62, []byte{62}}, + {7, 126, []byte{126}}, + {8, 254, []byte{254}}, + + // Multiple bytes: + {5, 1337, []byte{31, 154, 10}}, + } + for _, tt := range tests { + got := appendVarInt(nil, tt.n, tt.i) + if !bytes.Equal(got, tt.want) { + t.Errorf("appendVarInt(nil, %v, %v) = %v; want %v", tt.n, tt.i, got, tt.want) + } + } +} + +func TestAppendHpackString(t *testing.T) { + tests := []struct { + s, wantHex string + }{ + // Huffman encoded + {"www.example.com", "8c f1e3 c2e5 f23a 6ba0 ab90 f4ff"}, + + // Not Huffman encoded + {"a", "01 61"}, + + // zero length + {"", "00"}, + } + for _, tt := range tests { + want := removeSpace(tt.wantHex) + buf := appendHpackString(nil, tt.s) + if got := hex.EncodeToString(buf); want != got { + t.Errorf("appendHpackString(nil, %q) = %q; want %q", tt.s, got, want) + } + } +} + +func TestAppendIndexed(t *testing.T) { + tests := []struct { + i uint64 + wantHex string + }{ + // 1 byte + {1, "81"}, + {126, "fe"}, + + // 2 bytes + {127, "ff00"}, + {128, "ff01"}, + } + for _, tt := range tests { + want := removeSpace(tt.wantHex) + buf := appendIndexed(nil, tt.i) + if got := hex.EncodeToString(buf); want != got { + t.Errorf("appendIndex(nil, %v) = %q; want %q", tt.i, got, want) + } + } +} + +func TestAppendNewName(t *testing.T) { + tests := []struct { + f HeaderField + indexing bool + wantHex string + }{ + // Incremental indexing + {HeaderField{"custom-key", "custom-value", false}, true, "40 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"}, + + // Without indexing + {HeaderField{"custom-key", "custom-value", false}, false, "00 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"}, + + // Never indexed + {HeaderField{"custom-key", "custom-value", true}, true, "10 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"}, + {HeaderField{"custom-key", "custom-value", true}, false, "10 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"}, + } + for _, tt := range tests { + want := removeSpace(tt.wantHex) + buf := appendNewName(nil, tt.f, tt.indexing) + if got := hex.EncodeToString(buf); want != got { + t.Errorf("appendNewName(nil, %+v, %v) = %q; want %q", tt.f, tt.indexing, got, want) + } + } +} + +func TestAppendIndexedName(t *testing.T) { + tests := []struct { + f HeaderField + i uint64 + indexing bool + wantHex string + }{ + // Incremental indexing + {HeaderField{":status", "302", false}, 8, true, "48 82 6402"}, + + // Without indexing + {HeaderField{":status", "302", false}, 8, false, "08 82 6402"}, + + // Never indexed + {HeaderField{":status", "302", true}, 8, true, "18 82 6402"}, + {HeaderField{":status", "302", true}, 8, false, "18 82 6402"}, + } + for _, tt := range tests { + want := removeSpace(tt.wantHex) + buf := appendIndexedName(nil, tt.f, tt.i, tt.indexing) + if got := hex.EncodeToString(buf); want != got { + t.Errorf("appendIndexedName(nil, %+v, %v) = %q; want %q", tt.f, tt.indexing, got, want) + } + } +} + +func TestAppendTableSize(t *testing.T) { + tests := []struct { + i uint32 + wantHex string + }{ + // Fits into 1 byte + {30, "3e"}, + + // Extra byte + {31, "3f00"}, + {32, "3f01"}, + } + for _, tt := range tests { + want := removeSpace(tt.wantHex) + buf := appendTableSize(nil, tt.i) + if got := hex.EncodeToString(buf); want != got { + t.Errorf("appendTableSize(nil, %v) = %q; want %q", tt.i, got, want) + } + } +} + +func TestEncoderSetMaxDynamicTableSize(t *testing.T) { + var buf bytes.Buffer + e := NewEncoder(&buf) + tests := []struct { + v uint32 + wantUpdate bool + wantMinSize uint32 + wantMaxSize uint32 + }{ + // Set new table size to 2048 + {2048, true, 2048, 2048}, + + // Set new table size to 16384, but still limited to + // 4096 + {16384, true, 2048, 4096}, + } + for _, tt := range tests { + e.SetMaxDynamicTableSize(tt.v) + if got := e.tableSizeUpdate; tt.wantUpdate != got { + t.Errorf("e.tableSizeUpdate = %v; want %v", got, tt.wantUpdate) + } + if got := e.minSize; tt.wantMinSize != got { + t.Errorf("e.minSize = %v; want %v", got, tt.wantMinSize) + } + if got := e.dynTab.maxSize; tt.wantMaxSize != got { + t.Errorf("e.maxSize = %v; want %v", got, tt.wantMaxSize) + } + } +} + +func TestEncoderSetMaxDynamicTableSizeLimit(t *testing.T) { + e := NewEncoder(nil) + // 4095 < initialHeaderTableSize means maxSize is truncated to + // 4095. + e.SetMaxDynamicTableSizeLimit(4095) + if got, want := e.dynTab.maxSize, uint32(4095); got != want { + t.Errorf("e.dynTab.maxSize = %v; want %v", got, want) + } + if got, want := e.maxSizeLimit, uint32(4095); got != want { + t.Errorf("e.maxSizeLimit = %v; want %v", got, want) + } + if got, want := e.tableSizeUpdate, true; got != want { + t.Errorf("e.tableSizeUpdate = %v; want %v", got, want) + } + // maxSize will be truncated to maxSizeLimit + e.SetMaxDynamicTableSize(16384) + if got, want := e.dynTab.maxSize, uint32(4095); got != want { + t.Errorf("e.dynTab.maxSize = %v; want %v", got, want) + } + // 8192 > current maxSizeLimit, so maxSize does not change. + e.SetMaxDynamicTableSizeLimit(8192) + if got, want := e.dynTab.maxSize, uint32(4095); got != want { + t.Errorf("e.dynTab.maxSize = %v; want %v", got, want) + } + if got, want := e.maxSizeLimit, uint32(8192); got != want { + t.Errorf("e.maxSizeLimit = %v; want %v", got, want) + } +} + +func removeSpace(s string) string { + return strings.Replace(s, " ", "", -1) +} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/hpack/hpack.go b/Godeps/_workspace/src/github.com/bradfitz/http2/hpack/hpack.go new file mode 100644 index 000000000..c9e36f742 --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/hpack/hpack.go @@ -0,0 +1,445 @@ +// Copyright 2014 The Go Authors. +// See https://code.google.com/p/go/source/browse/CONTRIBUTORS +// Licensed under the same terms as Go itself: +// https://code.google.com/p/go/source/browse/LICENSE + +// Package hpack implements HPACK, a compression format for +// efficiently representing HTTP header fields in the context of HTTP/2. +// +// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09 +package hpack + +import ( + "bytes" + "errors" + "fmt" +) + +// A DecodingError is something the spec defines as a decoding error. +type DecodingError struct { + Err error +} + +func (de DecodingError) Error() string { + return fmt.Sprintf("decoding error: %v", de.Err) +} + +// An InvalidIndexError is returned when an encoder references a table +// entry before the static table or after the end of the dynamic table. +type InvalidIndexError int + +func (e InvalidIndexError) Error() string { + return fmt.Sprintf("invalid indexed representation index %d", int(e)) +} + +// A HeaderField is a name-value pair. Both the name and value are +// treated as opaque sequences of octets. +type HeaderField struct { + Name, Value string + + // Sensitive means that this header field should never be + // indexed. + Sensitive bool +} + +func (hf *HeaderField) size() uint32 { + // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 + // "The size of the dynamic table is the sum of the size of + // its entries. The size of an entry is the sum of its name's + // length in octets (as defined in Section 5.2), its value's + // length in octets (see Section 5.2), plus 32. The size of + // an entry is calculated using the length of the name and + // value without any Huffman encoding applied." + + // This can overflow if somebody makes a large HeaderField + // Name and/or Value by hand, but we don't care, because that + // won't happen on the wire because the encoding doesn't allow + // it. + return uint32(len(hf.Name) + len(hf.Value) + 32) +} + +// A Decoder is the decoding context for incremental processing of +// header blocks. +type Decoder struct { + dynTab dynamicTable + emit func(f HeaderField) + + // buf is the unparsed buffer. It's only written to + // saveBuf if it was truncated in the middle of a header + // block. Because it's usually not owned, we can only + // process it under Write. + buf []byte // usually not owned + saveBuf bytes.Buffer +} + +func NewDecoder(maxSize uint32, emitFunc func(f HeaderField)) *Decoder { + d := &Decoder{ + emit: emitFunc, + } + d.dynTab.allowedMaxSize = maxSize + d.dynTab.setMaxSize(maxSize) + return d +} + +// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their +// underlying buffers for garbage reasons. + +func (d *Decoder) SetMaxDynamicTableSize(v uint32) { + d.dynTab.setMaxSize(v) +} + +// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded +// stream (via dynamic table size updates) may set the maximum size +// to. +func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) { + d.dynTab.allowedMaxSize = v +} + +type dynamicTable struct { + // ents is the FIFO described at + // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2 + // The newest (low index) is append at the end, and items are + // evicted from the front. + ents []HeaderField + size uint32 + maxSize uint32 // current maxSize + allowedMaxSize uint32 // maxSize may go up to this, inclusive +} + +func (dt *dynamicTable) setMaxSize(v uint32) { + dt.maxSize = v + dt.evict() +} + +// TODO: change dynamicTable to be a struct with a slice and a size int field, +// per http://http2.github.io/http2-spec/compression.html#rfc.section.4.1: +// +// +// Then make add increment the size. maybe the max size should move from Decoder to +// dynamicTable and add should return an ok bool if there was enough space. +// +// Later we'll need a remove operation on dynamicTable. + +func (dt *dynamicTable) add(f HeaderField) { + dt.ents = append(dt.ents, f) + dt.size += f.size() + dt.evict() +} + +// If we're too big, evict old stuff (front of the slice) +func (dt *dynamicTable) evict() { + base := dt.ents // keep base pointer of slice + for dt.size > dt.maxSize { + dt.size -= dt.ents[0].size() + dt.ents = dt.ents[1:] + } + + // Shift slice contents down if we evicted things. + if len(dt.ents) != len(base) { + copy(base, dt.ents) + dt.ents = base[:len(dt.ents)] + } +} + +// constantTimeStringCompare compares string a and b in a constant +// time manner. +func constantTimeStringCompare(a, b string) bool { + if len(a) != len(b) { + return false + } + + c := byte(0) + + for i := 0; i < len(a); i++ { + c |= a[i] ^ b[i] + } + + return c == 0 +} + +// Search searches f in the table. The return value i is 0 if there is +// no name match. If there is name match or name/value match, i is the +// index of that entry (1-based). If both name and value match, +// nameValueMatch becomes true. +func (dt *dynamicTable) search(f HeaderField) (i uint64, nameValueMatch bool) { + l := len(dt.ents) + for j := l - 1; j >= 0; j-- { + ent := dt.ents[j] + if !constantTimeStringCompare(ent.Name, f.Name) { + continue + } + if i == 0 { + i = uint64(l - j) + } + if f.Sensitive { + continue + } + if !constantTimeStringCompare(ent.Value, f.Value) { + continue + } + i = uint64(l - j) + nameValueMatch = true + return + } + return +} + +func (d *Decoder) maxTableIndex() int { + return len(d.dynTab.ents) + len(staticTable) +} + +func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) { + if i < 1 { + return + } + if i > uint64(d.maxTableIndex()) { + return + } + if i <= uint64(len(staticTable)) { + return staticTable[i-1], true + } + dents := d.dynTab.ents + return dents[len(dents)-(int(i)-len(staticTable))], true +} + +// Decode decodes an entire block. +// +// TODO: remove this method and make it incremental later? This is +// easier for debugging now. +func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) { + var hf []HeaderField + saveFunc := d.emit + defer func() { d.emit = saveFunc }() + d.emit = func(f HeaderField) { hf = append(hf, f) } + if _, err := d.Write(p); err != nil { + return nil, err + } + if err := d.Close(); err != nil { + return nil, err + } + return hf, nil +} + +func (d *Decoder) Close() error { + if d.saveBuf.Len() > 0 { + d.saveBuf.Reset() + return DecodingError{errors.New("truncated headers")} + } + return nil +} + +func (d *Decoder) Write(p []byte) (n int, err error) { + if len(p) == 0 { + // Prevent state machine CPU attacks (making us redo + // work up to the point of finding out we don't have + // enough data) + return + } + // Only copy the data if we have to. Optimistically assume + // that p will contain a complete header block. + if d.saveBuf.Len() == 0 { + d.buf = p + } else { + d.saveBuf.Write(p) + d.buf = d.saveBuf.Bytes() + d.saveBuf.Reset() + } + + for len(d.buf) > 0 { + err = d.parseHeaderFieldRepr() + if err != nil { + if err == errNeedMore { + err = nil + d.saveBuf.Write(d.buf) + } + break + } + } + + return len(p), err +} + +// errNeedMore is an internal sentinel error value that means the +// buffer is truncated and we need to read more data before we can +// continue parsing. +var errNeedMore = errors.New("need more data") + +type indexType int + +const ( + indexedTrue indexType = iota + indexedFalse + indexedNever +) + +func (v indexType) indexed() bool { return v == indexedTrue } +func (v indexType) sensitive() bool { return v == indexedNever } + +// returns errNeedMore if there isn't enough data available. +// any other error is fatal. +// consumes d.buf iff it returns nil. +// precondition: must be called with len(d.buf) > 0 +func (d *Decoder) parseHeaderFieldRepr() error { + b := d.buf[0] + switch { + case b&128 != 0: + // Indexed representation. + // High bit set? + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.1 + return d.parseFieldIndexed() + case b&192 == 64: + // 6.2.1 Literal Header Field with Incremental Indexing + // 0b10xxxxxx: top two bits are 10 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1 + return d.parseFieldLiteral(6, indexedTrue) + case b&240 == 0: + // 6.2.2 Literal Header Field without Indexing + // 0b0000xxxx: top four bits are 0000 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2 + return d.parseFieldLiteral(4, indexedFalse) + case b&240 == 16: + // 6.2.3 Literal Header Field never Indexed + // 0b0001xxxx: top four bits are 0001 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3 + return d.parseFieldLiteral(4, indexedNever) + case b&224 == 32: + // 6.3 Dynamic Table Size Update + // Top three bits are '001'. + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.3 + return d.parseDynamicTableSizeUpdate() + } + + return DecodingError{errors.New("invalid encoding")} +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseFieldIndexed() error { + buf := d.buf + idx, buf, err := readVarInt(7, buf) + if err != nil { + return err + } + hf, ok := d.at(idx) + if !ok { + return DecodingError{InvalidIndexError(idx)} + } + d.emit(HeaderField{Name: hf.Name, Value: hf.Value}) + d.buf = buf + return nil +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error { + buf := d.buf + nameIdx, buf, err := readVarInt(n, buf) + if err != nil { + return err + } + + var hf HeaderField + if nameIdx > 0 { + ihf, ok := d.at(nameIdx) + if !ok { + return DecodingError{InvalidIndexError(nameIdx)} + } + hf.Name = ihf.Name + } else { + hf.Name, buf, err = readString(buf) + if err != nil { + return err + } + } + hf.Value, buf, err = readString(buf) + if err != nil { + return err + } + d.buf = buf + if it.indexed() { + d.dynTab.add(hf) + } + hf.Sensitive = it.sensitive() + d.emit(hf) + return nil +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseDynamicTableSizeUpdate() error { + buf := d.buf + size, buf, err := readVarInt(5, buf) + if err != nil { + return err + } + if size > uint64(d.dynTab.allowedMaxSize) { + return DecodingError{errors.New("dynamic table size update too large")} + } + d.dynTab.setMaxSize(uint32(size)) + d.buf = buf + return nil +} + +var errVarintOverflow = DecodingError{errors.New("varint integer overflow")} + +// readVarInt reads an unsigned variable length integer off the +// beginning of p. n is the parameter as described in +// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1. +// +// n must always be between 1 and 8. +// +// The returned remain buffer is either a smaller suffix of p, or err != nil. +// The error is errNeedMore if p doesn't contain a complete integer. +func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) { + if n < 1 || n > 8 { + panic("bad n") + } + if len(p) == 0 { + return 0, p, errNeedMore + } + i = uint64(p[0]) + if n < 8 { + i &= (1 << uint64(n)) - 1 + } + if i < (1< 0 { + b := p[0] + p = p[1:] + i += uint64(b&127) << m + if b&128 == 0 { + return i, p, nil + } + m += 7 + if m >= 63 { // TODO: proper overflow check. making this up. + return 0, origP, errVarintOverflow + } + } + return 0, origP, errNeedMore +} + +func readString(p []byte) (s string, remain []byte, err error) { + if len(p) == 0 { + return "", p, errNeedMore + } + isHuff := p[0]&128 != 0 + strLen, p, err := readVarInt(7, p) + if err != nil { + return "", p, err + } + if uint64(len(p)) < strLen { + return "", p, errNeedMore + } + if !isHuff { + return string(p[:strLen]), p[strLen:], nil + } + + // TODO: optimize this garbage: + var buf bytes.Buffer + if _, err := HuffmanDecode(&buf, p[:strLen]); err != nil { + return "", nil, err + } + return buf.String(), p[strLen:], nil +} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/hpack/hpack_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/hpack/hpack_test.go new file mode 100644 index 000000000..08bee414f --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/hpack/hpack_test.go @@ -0,0 +1,648 @@ +// Copyright 2014 The Go Authors. +// See https://code.google.com/p/go/source/browse/CONTRIBUTORS +// Licensed under the same terms as Go itself: +// https://code.google.com/p/go/source/browse/LICENSE + +package hpack + +import ( + "bufio" + "bytes" + "encoding/hex" + "fmt" + "reflect" + "regexp" + "strconv" + "strings" + "testing" +) + +func TestStaticTable(t *testing.T) { + fromSpec := ` + +-------+-----------------------------+---------------+ + | 1 | :authority | | + | 2 | :method | GET | + | 3 | :method | POST | + | 4 | :path | / | + | 5 | :path | /index.html | + | 6 | :scheme | http | + | 7 | :scheme | https | + | 8 | :status | 200 | + | 9 | :status | 204 | + | 10 | :status | 206 | + | 11 | :status | 304 | + | 12 | :status | 400 | + | 13 | :status | 404 | + | 14 | :status | 500 | + | 15 | accept-charset | | + | 16 | accept-encoding | gzip, deflate | + | 17 | accept-language | | + | 18 | accept-ranges | | + | 19 | accept | | + | 20 | access-control-allow-origin | | + | 21 | age | | + | 22 | allow | | + | 23 | authorization | | + | 24 | cache-control | | + | 25 | content-disposition | | + | 26 | content-encoding | | + | 27 | content-language | | + | 28 | content-length | | + | 29 | content-location | | + | 30 | content-range | | + | 31 | content-type | | + | 32 | cookie | | + | 33 | date | | + | 34 | etag | | + | 35 | expect | | + | 36 | expires | | + | 37 | from | | + | 38 | host | | + | 39 | if-match | | + | 40 | if-modified-since | | + | 41 | if-none-match | | + | 42 | if-range | | + | 43 | if-unmodified-since | | + | 44 | last-modified | | + | 45 | link | | + | 46 | location | | + | 47 | max-forwards | | + | 48 | proxy-authenticate | | + | 49 | proxy-authorization | | + | 50 | range | | + | 51 | referer | | + | 52 | refresh | | + | 53 | retry-after | | + | 54 | server | | + | 55 | set-cookie | | + | 56 | strict-transport-security | | + | 57 | transfer-encoding | | + | 58 | user-agent | | + | 59 | vary | | + | 60 | via | | + | 61 | www-authenticate | | + +-------+-----------------------------+---------------+ +` + bs := bufio.NewScanner(strings.NewReader(fromSpec)) + re := regexp.MustCompile(`\| (\d+)\s+\| (\S+)\s*\| (\S(.*\S)?)?\s+\|`) + for bs.Scan() { + l := bs.Text() + if !strings.Contains(l, "|") { + continue + } + m := re.FindStringSubmatch(l) + if m == nil { + continue + } + i, err := strconv.Atoi(m[1]) + if err != nil { + t.Errorf("Bogus integer on line %q", l) + continue + } + if i < 1 || i > len(staticTable) { + t.Errorf("Bogus index %d on line %q", i, l) + continue + } + if got, want := staticTable[i-1].Name, m[2]; got != want { + t.Errorf("header index %d name = %q; want %q", i, got, want) + } + if got, want := staticTable[i-1].Value, m[3]; got != want { + t.Errorf("header index %d value = %q; want %q", i, got, want) + } + } + if err := bs.Err(); err != nil { + t.Error(err) + } +} + +func (d *Decoder) mustAt(idx int) HeaderField { + if hf, ok := d.at(uint64(idx)); !ok { + panic(fmt.Sprintf("bogus index %d", idx)) + } else { + return hf + } +} + +func TestDynamicTableAt(t *testing.T) { + d := NewDecoder(4096, nil) + at := d.mustAt + if got, want := at(2), (pair(":method", "GET")); got != want { + t.Errorf("at(2) = %v; want %v", got, want) + } + d.dynTab.add(pair("foo", "bar")) + d.dynTab.add(pair("blake", "miz")) + if got, want := at(len(staticTable)+1), (pair("blake", "miz")); got != want { + t.Errorf("at(dyn 1) = %v; want %v", got, want) + } + if got, want := at(len(staticTable)+2), (pair("foo", "bar")); got != want { + t.Errorf("at(dyn 2) = %v; want %v", got, want) + } + if got, want := at(3), (pair(":method", "POST")); got != want { + t.Errorf("at(3) = %v; want %v", got, want) + } +} + +func TestDynamicTableSearch(t *testing.T) { + dt := dynamicTable{} + dt.setMaxSize(4096) + + dt.add(pair("foo", "bar")) + dt.add(pair("blake", "miz")) + dt.add(pair(":method", "GET")) + + tests := []struct { + hf HeaderField + wantI uint64 + wantMatch bool + }{ + // Name and Value match + {pair("foo", "bar"), 3, true}, + {pair(":method", "GET"), 1, true}, + + // Only name match because of Sensitive == true + {HeaderField{"blake", "miz", true}, 2, false}, + + // Only Name matches + {pair("foo", "..."), 3, false}, + {pair("blake", "..."), 2, false}, + {pair(":method", "..."), 1, false}, + + // None match + {pair("foo-", "bar"), 0, false}, + } + for _, tt := range tests { + if gotI, gotMatch := dt.search(tt.hf); gotI != tt.wantI || gotMatch != tt.wantMatch { + t.Errorf("d.search(%+v) = %v, %v; want %v, %v", tt.hf, gotI, gotMatch, tt.wantI, tt.wantMatch) + } + } +} + +func TestDynamicTableSizeEvict(t *testing.T) { + d := NewDecoder(4096, nil) + if want := uint32(0); d.dynTab.size != want { + t.Fatalf("size = %d; want %d", d.dynTab.size, want) + } + add := d.dynTab.add + add(pair("blake", "eats pizza")) + if want := uint32(15 + 32); d.dynTab.size != want { + t.Fatalf("after pizza, size = %d; want %d", d.dynTab.size, want) + } + add(pair("foo", "bar")) + if want := uint32(15 + 32 + 6 + 32); d.dynTab.size != want { + t.Fatalf("after foo bar, size = %d; want %d", d.dynTab.size, want) + } + d.dynTab.setMaxSize(15 + 32 + 1 /* slop */) + if want := uint32(6 + 32); d.dynTab.size != want { + t.Fatalf("after setMaxSize, size = %d; want %d", d.dynTab.size, want) + } + if got, want := d.mustAt(len(staticTable)+1), (pair("foo", "bar")); got != want { + t.Errorf("at(dyn 1) = %v; want %v", got, want) + } + add(pair("long", strings.Repeat("x", 500))) + if want := uint32(0); d.dynTab.size != want { + t.Fatalf("after big one, size = %d; want %d", d.dynTab.size, want) + } +} + +func TestDecoderDecode(t *testing.T) { + tests := []struct { + name string + in []byte + want []HeaderField + wantDynTab []HeaderField // newest entry first + }{ + // C.2.1 Literal Header Field with Indexing + // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.1 + {"C.2.1", dehex("400a 6375 7374 6f6d 2d6b 6579 0d63 7573 746f 6d2d 6865 6164 6572"), + []HeaderField{pair("custom-key", "custom-header")}, + []HeaderField{pair("custom-key", "custom-header")}, + }, + + // C.2.2 Literal Header Field without Indexing + // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.2 + {"C.2.2", dehex("040c 2f73 616d 706c 652f 7061 7468"), + []HeaderField{pair(":path", "/sample/path")}, + []HeaderField{}}, + + // C.2.3 Literal Header Field never Indexed + // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.3 + {"C.2.3", dehex("1008 7061 7373 776f 7264 0673 6563 7265 74"), + []HeaderField{{"password", "secret", true}}, + []HeaderField{}}, + + // C.2.4 Indexed Header Field + // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.4 + {"C.2.4", []byte("\x82"), + []HeaderField{pair(":method", "GET")}, + []HeaderField{}}, + } + for _, tt := range tests { + d := NewDecoder(4096, nil) + hf, err := d.DecodeFull(tt.in) + if err != nil { + t.Errorf("%s: %v", tt.name, err) + continue + } + if !reflect.DeepEqual(hf, tt.want) { + t.Errorf("%s: Got %v; want %v", tt.name, hf, tt.want) + } + gotDynTab := d.dynTab.reverseCopy() + if !reflect.DeepEqual(gotDynTab, tt.wantDynTab) { + t.Errorf("%s: dynamic table after = %v; want %v", tt.name, gotDynTab, tt.wantDynTab) + } + } +} + +func (dt *dynamicTable) reverseCopy() (hf []HeaderField) { + hf = make([]HeaderField, len(dt.ents)) + for i := range hf { + hf[i] = dt.ents[len(dt.ents)-1-i] + } + return +} + +type encAndWant struct { + enc []byte + want []HeaderField + wantDynTab []HeaderField + wantDynSize uint32 +} + +// C.3 Request Examples without Huffman Coding +// http://http2.github.io/http2-spec/compression.html#rfc.section.C.3 +func TestDecodeC3_NoHuffman(t *testing.T) { + testDecodeSeries(t, 4096, []encAndWant{ + {dehex("8286 8441 0f77 7777 2e65 7861 6d70 6c65 2e63 6f6d"), + []HeaderField{ + pair(":method", "GET"), + pair(":scheme", "http"), + pair(":path", "/"), + pair(":authority", "www.example.com"), + }, + []HeaderField{ + pair(":authority", "www.example.com"), + }, + 57, + }, + {dehex("8286 84be 5808 6e6f 2d63 6163 6865"), + []HeaderField{ + pair(":method", "GET"), + pair(":scheme", "http"), + pair(":path", "/"), + pair(":authority", "www.example.com"), + pair("cache-control", "no-cache"), + }, + []HeaderField{ + pair("cache-control", "no-cache"), + pair(":authority", "www.example.com"), + }, + 110, + }, + {dehex("8287 85bf 400a 6375 7374 6f6d 2d6b 6579 0c63 7573 746f 6d2d 7661 6c75 65"), + []HeaderField{ + pair(":method", "GET"), + pair(":scheme", "https"), + pair(":path", "/index.html"), + pair(":authority", "www.example.com"), + pair("custom-key", "custom-value"), + }, + []HeaderField{ + pair("custom-key", "custom-value"), + pair("cache-control", "no-cache"), + pair(":authority", "www.example.com"), + }, + 164, + }, + }) +} + +// C.4 Request Examples with Huffman Coding +// http://http2.github.io/http2-spec/compression.html#rfc.section.C.4 +func TestDecodeC4_Huffman(t *testing.T) { + testDecodeSeries(t, 4096, []encAndWant{ + {dehex("8286 8441 8cf1 e3c2 e5f2 3a6b a0ab 90f4 ff"), + []HeaderField{ + pair(":method", "GET"), + pair(":scheme", "http"), + pair(":path", "/"), + pair(":authority", "www.example.com"), + }, + []HeaderField{ + pair(":authority", "www.example.com"), + }, + 57, + }, + {dehex("8286 84be 5886 a8eb 1064 9cbf"), + []HeaderField{ + pair(":method", "GET"), + pair(":scheme", "http"), + pair(":path", "/"), + pair(":authority", "www.example.com"), + pair("cache-control", "no-cache"), + }, + []HeaderField{ + pair("cache-control", "no-cache"), + pair(":authority", "www.example.com"), + }, + 110, + }, + {dehex("8287 85bf 4088 25a8 49e9 5ba9 7d7f 8925 a849 e95b b8e8 b4bf"), + []HeaderField{ + pair(":method", "GET"), + pair(":scheme", "https"), + pair(":path", "/index.html"), + pair(":authority", "www.example.com"), + pair("custom-key", "custom-value"), + }, + []HeaderField{ + pair("custom-key", "custom-value"), + pair("cache-control", "no-cache"), + pair(":authority", "www.example.com"), + }, + 164, + }, + }) +} + +// http://http2.github.io/http2-spec/compression.html#rfc.section.C.5 +// "This section shows several consecutive header lists, corresponding +// to HTTP responses, on the same connection. The HTTP/2 setting +// parameter SETTINGS_HEADER_TABLE_SIZE is set to the value of 256 +// octets, causing some evictions to occur." +func TestDecodeC5_ResponsesNoHuff(t *testing.T) { + testDecodeSeries(t, 256, []encAndWant{ + {dehex(` +4803 3330 3258 0770 7269 7661 7465 611d +4d6f 6e2c 2032 3120 4f63 7420 3230 3133 +2032 303a 3133 3a32 3120 474d 546e 1768 +7474 7073 3a2f 2f77 7777 2e65 7861 6d70 +6c65 2e63 6f6d +`), + []HeaderField{ + pair(":status", "302"), + pair("cache-control", "private"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("location", "https://www.example.com"), + }, + []HeaderField{ + pair("location", "https://www.example.com"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("cache-control", "private"), + pair(":status", "302"), + }, + 222, + }, + {dehex("4803 3330 37c1 c0bf"), + []HeaderField{ + pair(":status", "307"), + pair("cache-control", "private"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("location", "https://www.example.com"), + }, + []HeaderField{ + pair(":status", "307"), + pair("location", "https://www.example.com"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("cache-control", "private"), + }, + 222, + }, + {dehex(` +88c1 611d 4d6f 6e2c 2032 3120 4f63 7420 +3230 3133 2032 303a 3133 3a32 3220 474d +54c0 5a04 677a 6970 7738 666f 6f3d 4153 +444a 4b48 514b 425a 584f 5157 454f 5049 +5541 5851 5745 4f49 553b 206d 6178 2d61 +6765 3d33 3630 303b 2076 6572 7369 6f6e +3d31 +`), + []HeaderField{ + pair(":status", "200"), + pair("cache-control", "private"), + pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"), + pair("location", "https://www.example.com"), + pair("content-encoding", "gzip"), + pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"), + }, + []HeaderField{ + pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"), + pair("content-encoding", "gzip"), + pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"), + }, + 215, + }, + }) +} + +// http://http2.github.io/http2-spec/compression.html#rfc.section.C.6 +// "This section shows the same examples as the previous section, but +// using Huffman encoding for the literal values. The HTTP/2 setting +// parameter SETTINGS_HEADER_TABLE_SIZE is set to the value of 256 +// octets, causing some evictions to occur. The eviction mechanism +// uses the length of the decoded literal values, so the same +// evictions occurs as in the previous section." +func TestDecodeC6_ResponsesHuffman(t *testing.T) { + testDecodeSeries(t, 256, []encAndWant{ + {dehex(` +4882 6402 5885 aec3 771a 4b61 96d0 7abe +9410 54d4 44a8 2005 9504 0b81 66e0 82a6 +2d1b ff6e 919d 29ad 1718 63c7 8f0b 97c8 +e9ae 82ae 43d3 +`), + []HeaderField{ + pair(":status", "302"), + pair("cache-control", "private"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("location", "https://www.example.com"), + }, + []HeaderField{ + pair("location", "https://www.example.com"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("cache-control", "private"), + pair(":status", "302"), + }, + 222, + }, + {dehex("4883 640e ffc1 c0bf"), + []HeaderField{ + pair(":status", "307"), + pair("cache-control", "private"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("location", "https://www.example.com"), + }, + []HeaderField{ + pair(":status", "307"), + pair("location", "https://www.example.com"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("cache-control", "private"), + }, + 222, + }, + {dehex(` +88c1 6196 d07a be94 1054 d444 a820 0595 +040b 8166 e084 a62d 1bff c05a 839b d9ab +77ad 94e7 821d d7f2 e6c7 b335 dfdf cd5b +3960 d5af 2708 7f36 72c1 ab27 0fb5 291f +9587 3160 65c0 03ed 4ee5 b106 3d50 07 +`), + []HeaderField{ + pair(":status", "200"), + pair("cache-control", "private"), + pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"), + pair("location", "https://www.example.com"), + pair("content-encoding", "gzip"), + pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"), + }, + []HeaderField{ + pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"), + pair("content-encoding", "gzip"), + pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"), + }, + 215, + }, + }) +} + +func testDecodeSeries(t *testing.T, size uint32, steps []encAndWant) { + d := NewDecoder(size, nil) + for i, step := range steps { + hf, err := d.DecodeFull(step.enc) + if err != nil { + t.Fatalf("Error at step index %d: %v", i, err) + } + if !reflect.DeepEqual(hf, step.want) { + t.Fatalf("At step index %d: Got headers %v; want %v", i, hf, step.want) + } + gotDynTab := d.dynTab.reverseCopy() + if !reflect.DeepEqual(gotDynTab, step.wantDynTab) { + t.Errorf("After step index %d, dynamic table = %v; want %v", i, gotDynTab, step.wantDynTab) + } + if d.dynTab.size != step.wantDynSize { + t.Errorf("After step index %d, dynamic table size = %v; want %v", i, d.dynTab.size, step.wantDynSize) + } + } +} + +func TestHuffmanDecode(t *testing.T) { + tests := []struct { + inHex, want string + }{ + {"f1e3 c2e5 f23a 6ba0 ab90 f4ff", "www.example.com"}, + {"a8eb 1064 9cbf", "no-cache"}, + {"25a8 49e9 5ba9 7d7f", "custom-key"}, + {"25a8 49e9 5bb8 e8b4 bf", "custom-value"}, + {"6402", "302"}, + {"aec3 771a 4b", "private"}, + {"d07a be94 1054 d444 a820 0595 040b 8166 e082 a62d 1bff", "Mon, 21 Oct 2013 20:13:21 GMT"}, + {"9d29 ad17 1863 c78f 0b97 c8e9 ae82 ae43 d3", "https://www.example.com"}, + {"9bd9 ab", "gzip"}, + {"94e7 821d d7f2 e6c7 b335 dfdf cd5b 3960 d5af 2708 7f36 72c1 ab27 0fb5 291f 9587 3160 65c0 03ed 4ee5 b106 3d50 07", + "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"}, + } + for i, tt := range tests { + var buf bytes.Buffer + in, err := hex.DecodeString(strings.Replace(tt.inHex, " ", "", -1)) + if err != nil { + t.Errorf("%d. hex input error: %v", i, err) + continue + } + if _, err := HuffmanDecode(&buf, in); err != nil { + t.Errorf("%d. decode error: %v", i, err) + continue + } + if got := buf.String(); tt.want != got { + t.Errorf("%d. decode = %q; want %q", i, got, tt.want) + } + } +} + +func TestAppendHuffmanString(t *testing.T) { + tests := []struct { + in, want string + }{ + {"www.example.com", "f1e3 c2e5 f23a 6ba0 ab90 f4ff"}, + {"no-cache", "a8eb 1064 9cbf"}, + {"custom-key", "25a8 49e9 5ba9 7d7f"}, + {"custom-value", "25a8 49e9 5bb8 e8b4 bf"}, + {"302", "6402"}, + {"private", "aec3 771a 4b"}, + {"Mon, 21 Oct 2013 20:13:21 GMT", "d07a be94 1054 d444 a820 0595 040b 8166 e082 a62d 1bff"}, + {"https://www.example.com", "9d29 ad17 1863 c78f 0b97 c8e9 ae82 ae43 d3"}, + {"gzip", "9bd9 ab"}, + {"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1", + "94e7 821d d7f2 e6c7 b335 dfdf cd5b 3960 d5af 2708 7f36 72c1 ab27 0fb5 291f 9587 3160 65c0 03ed 4ee5 b106 3d50 07"}, + } + for i, tt := range tests { + buf := []byte{} + want := strings.Replace(tt.want, " ", "", -1) + buf = AppendHuffmanString(buf, tt.in) + if got := hex.EncodeToString(buf); want != got { + t.Errorf("%d. encode = %q; want %q", i, got, want) + } + } +} + +func TestReadVarInt(t *testing.T) { + type res struct { + i uint64 + consumed int + err error + } + tests := []struct { + n byte + p []byte + want res + }{ + // Fits in a byte: + {1, []byte{0}, res{0, 1, nil}}, + {2, []byte{2}, res{2, 1, nil}}, + {3, []byte{6}, res{6, 1, nil}}, + {4, []byte{14}, res{14, 1, nil}}, + {5, []byte{30}, res{30, 1, nil}}, + {6, []byte{62}, res{62, 1, nil}}, + {7, []byte{126}, res{126, 1, nil}}, + {8, []byte{254}, res{254, 1, nil}}, + + // Doesn't fit in a byte: + {1, []byte{1}, res{0, 0, errNeedMore}}, + {2, []byte{3}, res{0, 0, errNeedMore}}, + {3, []byte{7}, res{0, 0, errNeedMore}}, + {4, []byte{15}, res{0, 0, errNeedMore}}, + {5, []byte{31}, res{0, 0, errNeedMore}}, + {6, []byte{63}, res{0, 0, errNeedMore}}, + {7, []byte{127}, res{0, 0, errNeedMore}}, + {8, []byte{255}, res{0, 0, errNeedMore}}, + + // Ignoring top bits: + {5, []byte{255, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 111 + {5, []byte{159, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 100 + {5, []byte{191, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 101 + + // Extra byte: + {5, []byte{191, 154, 10, 2}, res{1337, 3, nil}}, // extra byte + + // Short a byte: + {5, []byte{191, 154}, res{0, 0, errNeedMore}}, + + // integer overflow: + {1, []byte{255, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}, res{0, 0, errVarintOverflow}}, + } + for _, tt := range tests { + i, remain, err := readVarInt(tt.n, tt.p) + consumed := len(tt.p) - len(remain) + got := res{i, consumed, err} + if got != tt.want { + t.Errorf("readVarInt(%d, %v ~ %x) = %+v; want %+v", tt.n, tt.p, tt.p, got, tt.want) + } + } +} + +func dehex(s string) []byte { + s = strings.Replace(s, " ", "", -1) + s = strings.Replace(s, "\n", "", -1) + b, err := hex.DecodeString(s) + if err != nil { + panic(err) + } + return b +} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/hpack/huffman.go b/Godeps/_workspace/src/github.com/bradfitz/http2/hpack/huffman.go new file mode 100644 index 000000000..9fe76f68e --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/hpack/huffman.go @@ -0,0 +1,159 @@ +// Copyright 2014 The Go Authors. +// See https://code.google.com/p/go/source/browse/CONTRIBUTORS +// Licensed under the same terms as Go itself: +// https://code.google.com/p/go/source/browse/LICENSE + +package hpack + +import ( + "bytes" + "io" + "sync" +) + +var bufPool = sync.Pool{ + New: func() interface{} { return new(bytes.Buffer) }, +} + +// HuffmanDecode decodes the string in v and writes the expanded +// result to w, returning the number of bytes written to w and the +// Write call's return value. At most one Write call is made. +func HuffmanDecode(w io.Writer, v []byte) (int, error) { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + defer bufPool.Put(buf) + + n := rootHuffmanNode + cur, nbits := uint(0), uint8(0) + for _, b := range v { + cur = cur<<8 | uint(b) + nbits += 8 + for nbits >= 8 { + n = n.children[byte(cur>>(nbits-8))] + if n.children == nil { + buf.WriteByte(n.sym) + nbits -= n.codeLen + n = rootHuffmanNode + } else { + nbits -= 8 + } + } + } + for nbits > 0 { + n = n.children[byte(cur<<(8-nbits))] + if n.children != nil || n.codeLen > nbits { + break + } + buf.WriteByte(n.sym) + nbits -= n.codeLen + n = rootHuffmanNode + } + return w.Write(buf.Bytes()) +} + +type node struct { + // children is non-nil for internal nodes + children []*node + + // The following are only valid if children is nil: + codeLen uint8 // number of bits that led to the output of sym + sym byte // output symbol +} + +func newInternalNode() *node { + return &node{children: make([]*node, 256)} +} + +var rootHuffmanNode = newInternalNode() + +func init() { + for i, code := range huffmanCodes { + if i > 255 { + panic("too many huffman codes") + } + addDecoderNode(byte(i), code, huffmanCodeLen[i]) + } +} + +func addDecoderNode(sym byte, code uint32, codeLen uint8) { + cur := rootHuffmanNode + for codeLen > 8 { + codeLen -= 8 + i := uint8(code >> codeLen) + if cur.children[i] == nil { + cur.children[i] = newInternalNode() + } + cur = cur.children[i] + } + shift := 8 - codeLen + start, end := int(uint8(code<> (nbits - rembits)) + dst[len(dst)-1] |= t + } + + return dst +} + +// HuffmanEncodeLength returns the number of bytes required to encode +// s in Huffman codes. The result is round up to byte boundary. +func HuffmanEncodeLength(s string) uint64 { + n := uint64(0) + for i := 0; i < len(s); i++ { + n += uint64(huffmanCodeLen[s[i]]) + } + return (n + 7) / 8 +} + +// appendByteToHuffmanCode appends Huffman code for c to dst and +// returns the extended buffer and the remaining bits in the last +// element. The appending is not byte aligned and the remaining bits +// in the last element of dst is given in rembits. +func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) { + code := huffmanCodes[c] + nbits := huffmanCodeLen[c] + + for { + if rembits > nbits { + t := uint8(code << (rembits - nbits)) + dst[len(dst)-1] |= t + rembits -= nbits + break + } + + t := uint8(code >> (nbits - rembits)) + dst[len(dst)-1] |= t + + nbits -= rembits + rembits = 8 + + if nbits == 0 { + break + } + + dst = append(dst, 0) + } + + return dst, rembits +} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/hpack/tables.go b/Godeps/_workspace/src/github.com/bradfitz/http2/hpack/tables.go new file mode 100644 index 000000000..f898e2512 --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/hpack/tables.go @@ -0,0 +1,353 @@ +// Copyright 2014 The Go Authors. +// See https://code.google.com/p/go/source/browse/CONTRIBUTORS +// Licensed under the same terms as Go itself: +// https://code.google.com/p/go/source/browse/LICENSE + +package hpack + +func pair(name, value string) HeaderField { + return HeaderField{Name: name, Value: value} +} + +// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B +var staticTable = []HeaderField{ + pair(":authority", ""), // index 1 (1-based) + pair(":method", "GET"), + pair(":method", "POST"), + pair(":path", "/"), + pair(":path", "/index.html"), + pair(":scheme", "http"), + pair(":scheme", "https"), + pair(":status", "200"), + pair(":status", "204"), + pair(":status", "206"), + pair(":status", "304"), + pair(":status", "400"), + pair(":status", "404"), + pair(":status", "500"), + pair("accept-charset", ""), + pair("accept-encoding", "gzip, deflate"), + pair("accept-language", ""), + pair("accept-ranges", ""), + pair("accept", ""), + pair("access-control-allow-origin", ""), + pair("age", ""), + pair("allow", ""), + pair("authorization", ""), + pair("cache-control", ""), + pair("content-disposition", ""), + pair("content-encoding", ""), + pair("content-language", ""), + pair("content-length", ""), + pair("content-location", ""), + pair("content-range", ""), + pair("content-type", ""), + pair("cookie", ""), + pair("date", ""), + pair("etag", ""), + pair("expect", ""), + pair("expires", ""), + pair("from", ""), + pair("host", ""), + pair("if-match", ""), + pair("if-modified-since", ""), + pair("if-none-match", ""), + pair("if-range", ""), + pair("if-unmodified-since", ""), + pair("last-modified", ""), + pair("link", ""), + pair("location", ""), + pair("max-forwards", ""), + pair("proxy-authenticate", ""), + pair("proxy-authorization", ""), + pair("range", ""), + pair("referer", ""), + pair("refresh", ""), + pair("retry-after", ""), + pair("server", ""), + pair("set-cookie", ""), + pair("strict-transport-security", ""), + pair("transfer-encoding", ""), + pair("user-agent", ""), + pair("vary", ""), + pair("via", ""), + pair("www-authenticate", ""), +} + +var huffmanCodes = []uint32{ + 0x1ff8, + 0x7fffd8, + 0xfffffe2, + 0xfffffe3, + 0xfffffe4, + 0xfffffe5, + 0xfffffe6, + 0xfffffe7, + 0xfffffe8, + 0xffffea, + 0x3ffffffc, + 0xfffffe9, + 0xfffffea, + 0x3ffffffd, + 0xfffffeb, + 0xfffffec, + 0xfffffed, + 0xfffffee, + 0xfffffef, + 0xffffff0, + 0xffffff1, + 0xffffff2, + 0x3ffffffe, + 0xffffff3, + 0xffffff4, + 0xffffff5, + 0xffffff6, + 0xffffff7, + 0xffffff8, + 0xffffff9, + 0xffffffa, + 0xffffffb, + 0x14, + 0x3f8, + 0x3f9, + 0xffa, + 0x1ff9, + 0x15, + 0xf8, + 0x7fa, + 0x3fa, + 0x3fb, + 0xf9, + 0x7fb, + 0xfa, + 0x16, + 0x17, + 0x18, + 0x0, + 0x1, + 0x2, + 0x19, + 0x1a, + 0x1b, + 0x1c, + 0x1d, + 0x1e, + 0x1f, + 0x5c, + 0xfb, + 0x7ffc, + 0x20, + 0xffb, + 0x3fc, + 0x1ffa, + 0x21, + 0x5d, + 0x5e, + 0x5f, + 0x60, + 0x61, + 0x62, + 0x63, + 0x64, + 0x65, + 0x66, + 0x67, + 0x68, + 0x69, + 0x6a, + 0x6b, + 0x6c, + 0x6d, + 0x6e, + 0x6f, + 0x70, + 0x71, + 0x72, + 0xfc, + 0x73, + 0xfd, + 0x1ffb, + 0x7fff0, + 0x1ffc, + 0x3ffc, + 0x22, + 0x7ffd, + 0x3, + 0x23, + 0x4, + 0x24, + 0x5, + 0x25, + 0x26, + 0x27, + 0x6, + 0x74, + 0x75, + 0x28, + 0x29, + 0x2a, + 0x7, + 0x2b, + 0x76, + 0x2c, + 0x8, + 0x9, + 0x2d, + 0x77, + 0x78, + 0x79, + 0x7a, + 0x7b, + 0x7ffe, + 0x7fc, + 0x3ffd, + 0x1ffd, + 0xffffffc, + 0xfffe6, + 0x3fffd2, + 0xfffe7, + 0xfffe8, + 0x3fffd3, + 0x3fffd4, + 0x3fffd5, + 0x7fffd9, + 0x3fffd6, + 0x7fffda, + 0x7fffdb, + 0x7fffdc, + 0x7fffdd, + 0x7fffde, + 0xffffeb, + 0x7fffdf, + 0xffffec, + 0xffffed, + 0x3fffd7, + 0x7fffe0, + 0xffffee, + 0x7fffe1, + 0x7fffe2, + 0x7fffe3, + 0x7fffe4, + 0x1fffdc, + 0x3fffd8, + 0x7fffe5, + 0x3fffd9, + 0x7fffe6, + 0x7fffe7, + 0xffffef, + 0x3fffda, + 0x1fffdd, + 0xfffe9, + 0x3fffdb, + 0x3fffdc, + 0x7fffe8, + 0x7fffe9, + 0x1fffde, + 0x7fffea, + 0x3fffdd, + 0x3fffde, + 0xfffff0, + 0x1fffdf, + 0x3fffdf, + 0x7fffeb, + 0x7fffec, + 0x1fffe0, + 0x1fffe1, + 0x3fffe0, + 0x1fffe2, + 0x7fffed, + 0x3fffe1, + 0x7fffee, + 0x7fffef, + 0xfffea, + 0x3fffe2, + 0x3fffe3, + 0x3fffe4, + 0x7ffff0, + 0x3fffe5, + 0x3fffe6, + 0x7ffff1, + 0x3ffffe0, + 0x3ffffe1, + 0xfffeb, + 0x7fff1, + 0x3fffe7, + 0x7ffff2, + 0x3fffe8, + 0x1ffffec, + 0x3ffffe2, + 0x3ffffe3, + 0x3ffffe4, + 0x7ffffde, + 0x7ffffdf, + 0x3ffffe5, + 0xfffff1, + 0x1ffffed, + 0x7fff2, + 0x1fffe3, + 0x3ffffe6, + 0x7ffffe0, + 0x7ffffe1, + 0x3ffffe7, + 0x7ffffe2, + 0xfffff2, + 0x1fffe4, + 0x1fffe5, + 0x3ffffe8, + 0x3ffffe9, + 0xffffffd, + 0x7ffffe3, + 0x7ffffe4, + 0x7ffffe5, + 0xfffec, + 0xfffff3, + 0xfffed, + 0x1fffe6, + 0x3fffe9, + 0x1fffe7, + 0x1fffe8, + 0x7ffff3, + 0x3fffea, + 0x3fffeb, + 0x1ffffee, + 0x1ffffef, + 0xfffff4, + 0xfffff5, + 0x3ffffea, + 0x7ffff4, + 0x3ffffeb, + 0x7ffffe6, + 0x3ffffec, + 0x3ffffed, + 0x7ffffe7, + 0x7ffffe8, + 0x7ffffe9, + 0x7ffffea, + 0x7ffffeb, + 0xffffffe, + 0x7ffffec, + 0x7ffffed, + 0x7ffffee, + 0x7ffffef, + 0x7fffff0, + 0x3ffffee, +} + +var huffmanCodeLen = []uint8{ + 13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28, + 28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28, + 6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6, + 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10, + 13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6, + 15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5, + 6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28, + 20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23, + 24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24, + 22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23, + 21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23, + 26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25, + 19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27, + 20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23, + 26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26, +} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/http2.go b/Godeps/_workspace/src/github.com/bradfitz/http2/http2.go new file mode 100644 index 000000000..35f9b26e2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/http2.go @@ -0,0 +1,249 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// See https://code.google.com/p/go/source/browse/CONTRIBUTORS +// Licensed under the same terms as Go itself: +// https://code.google.com/p/go/source/browse/LICENSE + +// Package http2 implements the HTTP/2 protocol. +// +// This is a work in progress. This package is low-level and intended +// to be used directly by very few people. Most users will use it +// indirectly through integration with the net/http package. See +// ConfigureServer. That ConfigureServer call will likely be automatic +// or available via an empty import in the future. +// +// See http://http2.github.io/ +package http2 + +import ( + "bufio" + "fmt" + "io" + "net/http" + "strconv" + "sync" +) + +var VerboseLogs = false + +const ( + // ClientPreface is the string that must be sent by new + // connections from clients. + ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n" + + // SETTINGS_MAX_FRAME_SIZE default + // http://http2.github.io/http2-spec/#rfc.section.6.5.2 + initialMaxFrameSize = 16384 + + // NextProtoTLS is the NPN/ALPN protocol negotiated during + // HTTP/2's TLS setup. + NextProtoTLS = "h2" + + // http://http2.github.io/http2-spec/#SettingValues + initialHeaderTableSize = 4096 + + initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size + + defaultMaxReadFrameSize = 1 << 20 +) + +var ( + clientPreface = []byte(ClientPreface) +) + +type streamState int + +const ( + stateIdle streamState = iota + stateOpen + stateHalfClosedLocal + stateHalfClosedRemote + stateResvLocal + stateResvRemote + stateClosed +) + +var stateName = [...]string{ + stateIdle: "Idle", + stateOpen: "Open", + stateHalfClosedLocal: "HalfClosedLocal", + stateHalfClosedRemote: "HalfClosedRemote", + stateResvLocal: "ResvLocal", + stateResvRemote: "ResvRemote", + stateClosed: "Closed", +} + +func (st streamState) String() string { + return stateName[st] +} + +// Setting is a setting parameter: which setting it is, and its value. +type Setting struct { + // ID is which setting is being set. + // See http://http2.github.io/http2-spec/#SettingValues + ID SettingID + + // Val is the value. + Val uint32 +} + +func (s Setting) String() string { + return fmt.Sprintf("[%v = %d]", s.ID, s.Val) +} + +// Valid reports whether the setting is valid. +func (s Setting) Valid() error { + // Limits and error codes from 6.5.2 Defined SETTINGS Parameters + switch s.ID { + case SettingEnablePush: + if s.Val != 1 && s.Val != 0 { + return ConnectionError(ErrCodeProtocol) + } + case SettingInitialWindowSize: + if s.Val > 1<<31-1 { + return ConnectionError(ErrCodeFlowControl) + } + case SettingMaxFrameSize: + if s.Val < 16384 || s.Val > 1<<24-1 { + return ConnectionError(ErrCodeProtocol) + } + } + return nil +} + +// A SettingID is an HTTP/2 setting as defined in +// http://http2.github.io/http2-spec/#iana-settings +type SettingID uint16 + +const ( + SettingHeaderTableSize SettingID = 0x1 + SettingEnablePush SettingID = 0x2 + SettingMaxConcurrentStreams SettingID = 0x3 + SettingInitialWindowSize SettingID = 0x4 + SettingMaxFrameSize SettingID = 0x5 + SettingMaxHeaderListSize SettingID = 0x6 +) + +var settingName = map[SettingID]string{ + SettingHeaderTableSize: "HEADER_TABLE_SIZE", + SettingEnablePush: "ENABLE_PUSH", + SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", + SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", + SettingMaxFrameSize: "MAX_FRAME_SIZE", + SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", +} + +func (s SettingID) String() string { + if v, ok := settingName[s]; ok { + return v + } + return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s)) +} + +func validHeader(v string) bool { + if len(v) == 0 { + return false + } + for _, r := range v { + // "Just as in HTTP/1.x, header field names are + // strings of ASCII characters that are compared in a + // case-insensitive fashion. However, header field + // names MUST be converted to lowercase prior to their + // encoding in HTTP/2. " + if r >= 127 || ('A' <= r && r <= 'Z') { + return false + } + } + return true +} + +var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n) + +func init() { + for i := 100; i <= 999; i++ { + if v := http.StatusText(i); v != "" { + httpCodeStringCommon[i] = strconv.Itoa(i) + } + } +} + +func httpCodeString(code int) string { + if s, ok := httpCodeStringCommon[code]; ok { + return s + } + return strconv.Itoa(code) +} + +// from pkg io +type stringWriter interface { + WriteString(s string) (n int, err error) +} + +// A gate lets two goroutines coordinate their activities. +type gate chan struct{} + +func (g gate) Done() { g <- struct{}{} } +func (g gate) Wait() { <-g } + +// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed). +type closeWaiter chan struct{} + +// Init makes a closeWaiter usable. +// It exists because so a closeWaiter value can be placed inside a +// larger struct and have the Mutex and Cond's memory in the same +// allocation. +func (cw *closeWaiter) Init() { + *cw = make(chan struct{}) +} + +// Close marks the closeWaiter as closed and unblocks any waiters. +func (cw closeWaiter) Close() { + close(cw) +} + +// Wait waits for the closeWaiter to become closed. +func (cw closeWaiter) Wait() { + <-cw +} + +// bufferedWriter is a buffered writer that writes to w. +// Its buffered writer is lazily allocated as needed, to minimize +// idle memory usage with many connections. +type bufferedWriter struct { + w io.Writer // immutable + bw *bufio.Writer // non-nil when data is buffered +} + +func newBufferedWriter(w io.Writer) *bufferedWriter { + return &bufferedWriter{w: w} +} + +var bufWriterPool = sync.Pool{ + New: func() interface{} { + // TODO: pick something better? this is a bit under + // (3 x typical 1500 byte MTU) at least. + return bufio.NewWriterSize(nil, 4<<10) + }, +} + +func (w *bufferedWriter) Write(p []byte) (n int, err error) { + if w.bw == nil { + bw := bufWriterPool.Get().(*bufio.Writer) + bw.Reset(w.w) + w.bw = bw + } + return w.bw.Write(p) +} + +func (w *bufferedWriter) Flush() error { + bw := w.bw + if bw == nil { + return nil + } + err := bw.Flush() + bw.Reset(nil) + bufWriterPool.Put(bw) + w.bw = nil + return err +} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/http2_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/http2_test.go new file mode 100644 index 000000000..6a1b7e836 --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/http2_test.go @@ -0,0 +1,152 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// See https://code.google.com/p/go/source/browse/CONTRIBUTORS +// Licensed under the same terms as Go itself: +// https://code.google.com/p/go/source/browse/LICENSE + +package http2 + +import ( + "bytes" + "errors" + "flag" + "fmt" + "net/http" + "os/exec" + "strconv" + "strings" + "testing" + + "github.com/bradfitz/http2/hpack" +) + +var knownFailing = flag.Bool("known_failing", false, "Run known-failing tests.") + +func condSkipFailingTest(t *testing.T) { + if !*knownFailing { + t.Skip("Skipping known-failing test without --known_failing") + } +} + +func init() { + DebugGoroutines = true + flag.BoolVar(&VerboseLogs, "verboseh2", false, "Verbose HTTP/2 debug logging") +} + +func TestSettingString(t *testing.T) { + tests := []struct { + s Setting + want string + }{ + {Setting{SettingMaxFrameSize, 123}, "[MAX_FRAME_SIZE = 123]"}, + {Setting{1<<16 - 1, 123}, "[UNKNOWN_SETTING_65535 = 123]"}, + } + for i, tt := range tests { + got := fmt.Sprint(tt.s) + if got != tt.want { + t.Errorf("%d. for %#v, string = %q; want %q", i, tt.s, got, tt.want) + } + } +} + +type twriter struct { + t testing.TB + st *serverTester // optional +} + +func (w twriter) Write(p []byte) (n int, err error) { + if w.st != nil { + ps := string(p) + for _, phrase := range w.st.logFilter { + if strings.Contains(ps, phrase) { + return len(p), nil // no logging + } + } + } + w.t.Logf("%s", p) + return len(p), nil +} + +// like encodeHeader, but don't add implicit psuedo headers. +func encodeHeaderNoImplicit(t *testing.T, headers ...string) []byte { + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + for len(headers) > 0 { + k, v := headers[0], headers[1] + headers = headers[2:] + if err := enc.WriteField(hpack.HeaderField{Name: k, Value: v}); err != nil { + t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err) + } + } + return buf.Bytes() +} + +// Verify that curl has http2. +func requireCurl(t *testing.T) { + out, err := dockerLogs(curl(t, "--version")) + if err != nil { + t.Skipf("failed to determine curl features; skipping test") + } + if !strings.Contains(string(out), "HTTP2") { + t.Skip("curl doesn't support HTTP2; skipping test") + } +} + +func curl(t *testing.T, args ...string) (container string) { + out, err := exec.Command("docker", append([]string{"run", "-d", "--net=host", "gohttp2/curl"}, args...)...).CombinedOutput() + if err != nil { + t.Skipf("Failed to run curl in docker: %v, %s", err, out) + } + return strings.TrimSpace(string(out)) +} + +type puppetCommand struct { + fn func(w http.ResponseWriter, r *http.Request) + done chan<- bool +} + +type handlerPuppet struct { + ch chan puppetCommand +} + +func newHandlerPuppet() *handlerPuppet { + return &handlerPuppet{ + ch: make(chan puppetCommand), + } +} + +func (p *handlerPuppet) act(w http.ResponseWriter, r *http.Request) { + for cmd := range p.ch { + cmd.fn(w, r) + cmd.done <- true + } +} + +func (p *handlerPuppet) done() { close(p.ch) } +func (p *handlerPuppet) do(fn func(http.ResponseWriter, *http.Request)) { + done := make(chan bool) + p.ch <- puppetCommand{fn, done} + <-done +} +func dockerLogs(container string) ([]byte, error) { + out, err := exec.Command("docker", "wait", container).CombinedOutput() + if err != nil { + return out, err + } + exitStatus, err := strconv.Atoi(strings.TrimSpace(string(out))) + if err != nil { + return out, errors.New("unexpected exit status from docker wait") + } + out, err = exec.Command("docker", "logs", container).CombinedOutput() + exec.Command("docker", "rm", container).Run() + if err == nil && exitStatus != 0 { + err = fmt.Errorf("exit status %d: %s", exitStatus, out) + } + return out, err +} + +func kill(container string) { + exec.Command("docker", "kill", container).Run() + exec.Command("docker", "rm", container).Run() +} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/pipe.go b/Godeps/_workspace/src/github.com/bradfitz/http2/pipe.go new file mode 100644 index 000000000..ce9aad533 --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/pipe.go @@ -0,0 +1,43 @@ +// Copyright 2014 The Go Authors. +// See https://code.google.com/p/go/source/browse/CONTRIBUTORS +// Licensed under the same terms as Go itself: +// https://code.google.com/p/go/source/browse/LICENSE + +package http2 + +import ( + "sync" +) + +type pipe struct { + b buffer + c sync.Cond + m sync.Mutex +} + +// Read waits until data is available and copies bytes +// from the buffer into p. +func (r *pipe) Read(p []byte) (n int, err error) { + r.c.L.Lock() + defer r.c.L.Unlock() + for r.b.Len() == 0 && !r.b.closed { + r.c.Wait() + } + return r.b.Read(p) +} + +// Write copies bytes from p into the buffer and wakes a reader. +// It is an error to write more data than the buffer can hold. +func (w *pipe) Write(p []byte) (n int, err error) { + w.c.L.Lock() + defer w.c.L.Unlock() + defer w.c.Signal() + return w.b.Write(p) +} + +func (c *pipe) Close(err error) { + c.c.L.Lock() + defer c.c.L.Unlock() + defer c.c.Signal() + c.b.Close(err) +} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/pipe_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/pipe_test.go new file mode 100644 index 000000000..10c4c327c --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/pipe_test.go @@ -0,0 +1,24 @@ +// Copyright 2014 The Go Authors. +// See https://code.google.com/p/go/source/browse/CONTRIBUTORS +// Licensed under the same terms as Go itself: +// https://code.google.com/p/go/source/browse/LICENSE + +package http2 + +import ( + "errors" + "testing" +) + +func TestPipeClose(t *testing.T) { + var p pipe + p.c.L = &p.m + a := errors.New("a") + b := errors.New("b") + p.Close(a) + p.Close(b) + _, err := p.Read(make([]byte, 1)) + if err != a { + t.Errorf("err = %v want %v", err, a) + } +} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/priority_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/priority_test.go new file mode 100644 index 000000000..d9648fd32 --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/priority_test.go @@ -0,0 +1,121 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// See https://code.google.com/p/go/source/browse/CONTRIBUTORS +// Licensed under the same terms as Go itself: +// https://code.google.com/p/go/source/browse/LICENSE + +package http2 + +import ( + "testing" +) + +func TestPriority(t *testing.T) { + // A -> B + // move A's parent to B + streams := make(map[uint32]*stream) + a := &stream{ + parent: nil, + weight: 16, + } + streams[1] = a + b := &stream{ + parent: a, + weight: 16, + } + streams[2] = b + adjustStreamPriority(streams, 1, PriorityParam{ + Weight: 20, + StreamDep: 2, + }) + if a.parent != b { + t.Errorf("Expected A's parent to be B") + } + if a.weight != 20 { + t.Errorf("Expected A's weight to be 20; got %d", a.weight) + } + if b.parent != nil { + t.Errorf("Expected B to have no parent") + } + if b.weight != 16 { + t.Errorf("Expected B's weight to be 16; got %d", b.weight) + } +} + +func TestPriorityExclusiveZero(t *testing.T) { + // A B and C are all children of the 0 stream. + // Exclusive reprioritization to any of the streams + // should bring the rest of the streams under the + // reprioritized stream + streams := make(map[uint32]*stream) + a := &stream{ + parent: nil, + weight: 16, + } + streams[1] = a + b := &stream{ + parent: nil, + weight: 16, + } + streams[2] = b + c := &stream{ + parent: nil, + weight: 16, + } + streams[3] = c + adjustStreamPriority(streams, 3, PriorityParam{ + Weight: 20, + StreamDep: 0, + Exclusive: true, + }) + if a.parent != c { + t.Errorf("Expected A's parent to be C") + } + if a.weight != 16 { + t.Errorf("Expected A's weight to be 16; got %d", a.weight) + } + if b.parent != c { + t.Errorf("Expected B's parent to be C") + } + if b.weight != 16 { + t.Errorf("Expected B's weight to be 16; got %d", b.weight) + } + if c.parent != nil { + t.Errorf("Expected C to have no parent") + } + if c.weight != 20 { + t.Errorf("Expected C's weight to be 20; got %d", b.weight) + } +} + +func TestPriorityOwnParent(t *testing.T) { + streams := make(map[uint32]*stream) + a := &stream{ + parent: nil, + weight: 16, + } + streams[1] = a + b := &stream{ + parent: a, + weight: 16, + } + streams[2] = b + adjustStreamPriority(streams, 1, PriorityParam{ + Weight: 20, + StreamDep: 1, + }) + if a.parent != nil { + t.Errorf("Expected A's parent to be nil") + } + if a.weight != 20 { + t.Errorf("Expected A's weight to be 20; got %d", a.weight) + } + if b.parent != a { + t.Errorf("Expected B's parent to be A") + } + if b.weight != 16 { + t.Errorf("Expected B's weight to be 16; got %d", b.weight) + } + +} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/server.go b/Godeps/_workspace/src/github.com/bradfitz/http2/server.go new file mode 100644 index 000000000..aa0e7bdc8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/server.go @@ -0,0 +1,1780 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// See https://code.google.com/p/go/source/browse/CONTRIBUTORS +// Licensed under the same terms as Go itself: +// https://code.google.com/p/go/source/browse/LICENSE + +// TODO: replace all <-sc.doneServing with reads from the stream's cw +// instead, and make sure that on close we close all open +// streams. then remove doneServing? + +// TODO: finish GOAWAY support. Consider each incoming frame type and +// whether it should be ignored during a shutdown race. + +// TODO: disconnect idle clients. GFE seems to do 4 minutes. make +// configurable? or maximum number of idle clients and remove the +// oldest? + +// TODO: turn off the serve goroutine when idle, so +// an idle conn only has the readFrames goroutine active. (which could +// also be optimized probably to pin less memory in crypto/tls). This +// would involve tracking when the serve goroutine is active (atomic +// int32 read/CAS probably?) and starting it up when frames arrive, +// and shutting it down when all handlers exit. the occasional PING +// packets could use time.AfterFunc to call sc.wakeStartServeLoop() +// (which is a no-op if already running) and then queue the PING write +// as normal. The serve loop would then exit in most cases (if no +// Handlers running) and not be woken up again until the PING packet +// returns. + +// TODO (maybe): add a mechanism for Handlers to going into +// half-closed-local mode (rw.(io.Closer) test?) but not exit their +// handler, and continue to be able to read from the +// Request.Body. This would be a somewhat semantic change from HTTP/1 +// (or at least what we expose in net/http), so I'd probably want to +// add it there too. For now, this package says that returning from +// the Handler ServeHTTP function means you're both done reading and +// done writing, without a way to stop just one or the other. + +package http2 + +import ( + "bufio" + "bytes" + "crypto/tls" + "errors" + "fmt" + "io" + "log" + "net" + "net/http" + "net/url" + "strconv" + "strings" + "sync" + "time" + + "github.com/bradfitz/http2/hpack" +) + +const ( + prefaceTimeout = 10 * time.Second + firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway + handlerChunkWriteSize = 4 << 10 + defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? +) + +var ( + errClientDisconnected = errors.New("client disconnected") + errClosedBody = errors.New("body closed by handler") + errStreamBroken = errors.New("http2: stream broken") +) + +var responseWriterStatePool = sync.Pool{ + New: func() interface{} { + rws := &responseWriterState{} + rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize) + return rws + }, +} + +// Test hooks. +var ( + testHookOnConn func() + testHookGetServerConn func(*serverConn) + testHookOnPanicMu *sync.Mutex // nil except in tests + testHookOnPanic func(sc *serverConn, panicVal interface{}) (rePanic bool) +) + +// Server is an HTTP/2 server. +type Server struct { + // MaxHandlers limits the number of http.Handler ServeHTTP goroutines + // which may run at a time over all connections. + // Negative or zero no limit. + // TODO: implement + MaxHandlers int + + // MaxConcurrentStreams optionally specifies the number of + // concurrent streams that each client may have open at a + // time. This is unrelated to the number of http.Handler goroutines + // which may be active globally, which is MaxHandlers. + // If zero, MaxConcurrentStreams defaults to at least 100, per + // the HTTP/2 spec's recommendations. + MaxConcurrentStreams uint32 + + // MaxReadFrameSize optionally specifies the largest frame + // this server is willing to read. A valid value is between + // 16k and 16M, inclusive. If zero or otherwise invalid, a + // default value is used. + MaxReadFrameSize uint32 + + // PermitProhibitedCipherSuites, if true, permits the use of + // cipher suites prohibited by the HTTP/2 spec. + PermitProhibitedCipherSuites bool +} + +func (s *Server) maxReadFrameSize() uint32 { + if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { + return v + } + return defaultMaxReadFrameSize +} + +func (s *Server) maxConcurrentStreams() uint32 { + if v := s.MaxConcurrentStreams; v > 0 { + return v + } + return defaultMaxStreams +} + +// ConfigureServer adds HTTP/2 support to a net/http Server. +// +// The configuration conf may be nil. +// +// ConfigureServer must be called before s begins serving. +func ConfigureServer(s *http.Server, conf *Server) { + if conf == nil { + conf = new(Server) + } + if s.TLSConfig == nil { + s.TLSConfig = new(tls.Config) + } + + // Note: not setting MinVersion to tls.VersionTLS12, + // as we don't want to interfere with HTTP/1.1 traffic + // on the user's server. We enforce TLS 1.2 later once + // we accept a connection. Ideally this should be done + // during next-proto selection, but using TLS <1.2 with + // HTTP/2 is still the client's bug. + + // Be sure we advertise tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + // at least. + // TODO: enable PreferServerCipherSuites? + if s.TLSConfig.CipherSuites != nil { + const requiredCipher = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + haveRequired := false + for _, v := range s.TLSConfig.CipherSuites { + if v == requiredCipher { + haveRequired = true + break + } + } + if !haveRequired { + s.TLSConfig.CipherSuites = append(s.TLSConfig.CipherSuites, requiredCipher) + } + } + + haveNPN := false + for _, p := range s.TLSConfig.NextProtos { + if p == NextProtoTLS { + haveNPN = true + break + } + } + if !haveNPN { + s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS) + } + // h2-14 is temporary (as of 2015-03-05) while we wait for all browsers + // to switch to "h2". + s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, "h2-14") + + if s.TLSNextProto == nil { + s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} + } + protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) { + if testHookOnConn != nil { + testHookOnConn() + } + conf.handleConn(hs, c, h) + } + s.TLSNextProto[NextProtoTLS] = protoHandler + s.TLSNextProto["h2-14"] = protoHandler // temporary; see above. +} + +func (srv *Server) handleConn(hs *http.Server, c net.Conn, h http.Handler) { + sc := &serverConn{ + srv: srv, + hs: hs, + conn: c, + remoteAddrStr: c.RemoteAddr().String(), + bw: newBufferedWriter(c), + handler: h, + streams: make(map[uint32]*stream), + readFrameCh: make(chan frameAndGate), + readFrameErrCh: make(chan error, 1), // must be buffered for 1 + wantWriteFrameCh: make(chan frameWriteMsg, 8), + wroteFrameCh: make(chan struct{}, 1), // buffered; one send in reading goroutine + bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way + doneServing: make(chan struct{}), + advMaxStreams: srv.maxConcurrentStreams(), + writeSched: writeScheduler{ + maxFrameSize: initialMaxFrameSize, + }, + initialWindowSize: initialWindowSize, + headerTableSize: initialHeaderTableSize, + serveG: newGoroutineLock(), + pushEnabled: true, + } + sc.flow.add(initialWindowSize) + sc.inflow.add(initialWindowSize) + sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) + sc.hpackDecoder = hpack.NewDecoder(initialHeaderTableSize, sc.onNewHeaderField) + + fr := NewFramer(sc.bw, c) + fr.SetMaxReadFrameSize(srv.maxReadFrameSize()) + sc.framer = fr + + if tc, ok := c.(*tls.Conn); ok { + sc.tlsState = new(tls.ConnectionState) + *sc.tlsState = tc.ConnectionState() + // 9.2 Use of TLS Features + // An implementation of HTTP/2 over TLS MUST use TLS + // 1.2 or higher with the restrictions on feature set + // and cipher suite described in this section. Due to + // implementation limitations, it might not be + // possible to fail TLS negotiation. An endpoint MUST + // immediately terminate an HTTP/2 connection that + // does not meet the TLS requirements described in + // this section with a connection error (Section + // 5.4.1) of type INADEQUATE_SECURITY. + if sc.tlsState.Version < tls.VersionTLS12 { + sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low") + return + } + + if sc.tlsState.ServerName == "" { + // Client must use SNI, but we don't enforce that anymore, + // since it was causing problems when connecting to bare IP + // addresses during development. + // + // TODO: optionally enforce? Or enforce at the time we receive + // a new request, and verify the the ServerName matches the :authority? + // But that precludes proxy situations, perhaps. + // + // So for now, do nothing here again. + } + + if !srv.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { + // "Endpoints MAY choose to generate a connection error + // (Section 5.4.1) of type INADEQUATE_SECURITY if one of + // the prohibited cipher suites are negotiated." + // + // We choose that. In my opinion, the spec is weak + // here. It also says both parties must support at least + // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no + // excuses here. If we really must, we could allow an + // "AllowInsecureWeakCiphers" option on the server later. + // Let's see how it plays out first. + sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite)) + return + } + } + + if hook := testHookGetServerConn; hook != nil { + hook(sc) + } + sc.serve() +} + +// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. +func isBadCipher(cipher uint16) bool { + switch cipher { + case tls.TLS_RSA_WITH_RC4_128_SHA, + tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, + tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: + // Reject cipher suites from Appendix A. + // "This list includes those cipher suites that do not + // offer an ephemeral key exchange and those that are + // based on the TLS null, stream or block cipher type" + return true + default: + return false + } +} + +func (sc *serverConn) rejectConn(err ErrCode, debug string) { + log.Printf("REJECTING conn: %v, %s", err, debug) + // ignoring errors. hanging up anyway. + sc.framer.WriteGoAway(0, err, []byte(debug)) + sc.bw.Flush() + sc.conn.Close() +} + +// frameAndGates coordinates the readFrames and serve +// goroutines. Because the Framer interface only permits the most +// recently-read Frame from being accessed, the readFrames goroutine +// blocks until it has a frame, passes it to serve, and then waits for +// serve to be done with it before reading the next one. +type frameAndGate struct { + f Frame + g gate +} + +type serverConn struct { + // Immutable: + srv *Server + hs *http.Server + conn net.Conn + bw *bufferedWriter // writing to conn + handler http.Handler + framer *Framer + hpackDecoder *hpack.Decoder + doneServing chan struct{} // closed when serverConn.serve ends + readFrameCh chan frameAndGate // written by serverConn.readFrames + readFrameErrCh chan error + wantWriteFrameCh chan frameWriteMsg // from handlers -> serve + wroteFrameCh chan struct{} // from writeFrameAsync -> serve, tickles more frame writes + bodyReadCh chan bodyReadMsg // from handlers -> serve + testHookCh chan func() // code to run on the serve loop + flow flow // conn-wide (not stream-specific) outbound flow control + inflow flow // conn-wide inbound flow control + tlsState *tls.ConnectionState // shared by all handlers, like net/http + remoteAddrStr string + + // Everything following is owned by the serve loop; use serveG.check(): + serveG goroutineLock // used to verify funcs are on serve() + pushEnabled bool + sawFirstSettings bool // got the initial SETTINGS frame after the preface + needToSendSettingsAck bool + unackedSettings int // how many SETTINGS have we sent without ACKs? + clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) + advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client + curOpenStreams uint32 // client's number of open streams + maxStreamID uint32 // max ever seen + streams map[uint32]*stream + initialWindowSize int32 + headerTableSize uint32 + maxHeaderListSize uint32 // zero means unknown (default) + canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case + req requestParam // non-zero while reading request headers + writingFrame bool // started write goroutine but haven't heard back on wroteFrameCh + needsFrameFlush bool // last frame write wasn't a flush + writeSched writeScheduler + inGoAway bool // we've started to or sent GOAWAY + needToSendGoAway bool // we need to schedule a GOAWAY frame write + goAwayCode ErrCode + shutdownTimerCh <-chan time.Time // nil until used + shutdownTimer *time.Timer // nil until used + + // Owned by the writeFrameAsync goroutine: + headerWriteBuf bytes.Buffer + hpackEncoder *hpack.Encoder +} + +// requestParam is the state of the next request, initialized over +// potentially several frames HEADERS + zero or more CONTINUATION +// frames. +type requestParam struct { + // stream is non-nil if we're reading (HEADER or CONTINUATION) + // frames for a request (but not DATA). + stream *stream + header http.Header + method, path string + scheme, authority string + sawRegularHeader bool // saw a non-pseudo header already + invalidHeader bool // an invalid header was seen +} + +// stream represents a stream. This is the minimal metadata needed by +// the serve goroutine. Most of the actual stream state is owned by +// the http.Handler's goroutine in the responseWriter. Because the +// responseWriter's responseWriterState is recycled at the end of a +// handler, this struct intentionally has no pointer to the +// *responseWriter{,State} itself, as the Handler ending nils out the +// responseWriter's state field. +type stream struct { + // immutable: + id uint32 + body *pipe // non-nil if expecting DATA frames + cw closeWaiter // closed wait stream transitions to closed state + + // owned by serverConn's serve loop: + bodyBytes int64 // body bytes seen so far + declBodyBytes int64 // or -1 if undeclared + flow flow // limits writing from Handler to client + inflow flow // what the client is allowed to POST/etc to us + parent *stream // or nil + weight uint8 + state streamState + sentReset bool // only true once detached from streams map + gotReset bool // only true once detacted from streams map +} + +func (sc *serverConn) Framer() *Framer { return sc.framer } +func (sc *serverConn) CloseConn() error { return sc.conn.Close() } +func (sc *serverConn) Flush() error { return sc.bw.Flush() } +func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) { + return sc.hpackEncoder, &sc.headerWriteBuf +} + +func (sc *serverConn) state(streamID uint32) (streamState, *stream) { + sc.serveG.check() + // http://http2.github.io/http2-spec/#rfc.section.5.1 + if st, ok := sc.streams[streamID]; ok { + return st.state, st + } + // "The first use of a new stream identifier implicitly closes all + // streams in the "idle" state that might have been initiated by + // that peer with a lower-valued stream identifier. For example, if + // a client sends a HEADERS frame on stream 7 without ever sending a + // frame on stream 5, then stream 5 transitions to the "closed" + // state when the first frame for stream 7 is sent or received." + if streamID <= sc.maxStreamID { + return stateClosed, nil + } + return stateIdle, nil +} + +func (sc *serverConn) vlogf(format string, args ...interface{}) { + if VerboseLogs { + sc.logf(format, args...) + } +} + +func (sc *serverConn) logf(format string, args ...interface{}) { + if lg := sc.hs.ErrorLog; lg != nil { + lg.Printf(format, args...) + } else { + log.Printf(format, args...) + } +} + +func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { + if err == nil { + return + } + str := err.Error() + if err == io.EOF || strings.Contains(str, "use of closed network connection") { + // Boring, expected errors. + sc.vlogf(format, args...) + } else { + sc.logf(format, args...) + } +} + +func (sc *serverConn) onNewHeaderField(f hpack.HeaderField) { + sc.serveG.check() + sc.vlogf("got header field %+v", f) + switch { + case !validHeader(f.Name): + sc.req.invalidHeader = true + case strings.HasPrefix(f.Name, ":"): + if sc.req.sawRegularHeader { + sc.logf("pseudo-header after regular header") + sc.req.invalidHeader = true + return + } + var dst *string + switch f.Name { + case ":method": + dst = &sc.req.method + case ":path": + dst = &sc.req.path + case ":scheme": + dst = &sc.req.scheme + case ":authority": + dst = &sc.req.authority + default: + // 8.1.2.1 Pseudo-Header Fields + // "Endpoints MUST treat a request or response + // that contains undefined or invalid + // pseudo-header fields as malformed (Section + // 8.1.2.6)." + sc.logf("invalid pseudo-header %q", f.Name) + sc.req.invalidHeader = true + return + } + if *dst != "" { + sc.logf("duplicate pseudo-header %q sent", f.Name) + sc.req.invalidHeader = true + return + } + *dst = f.Value + case f.Name == "cookie": + sc.req.sawRegularHeader = true + if s, ok := sc.req.header["Cookie"]; ok && len(s) == 1 { + s[0] = s[0] + "; " + f.Value + } else { + sc.req.header.Add("Cookie", f.Value) + } + default: + sc.req.sawRegularHeader = true + sc.req.header.Add(sc.canonicalHeader(f.Name), f.Value) + } +} + +func (sc *serverConn) canonicalHeader(v string) string { + sc.serveG.check() + cv, ok := commonCanonHeader[v] + if ok { + return cv + } + cv, ok = sc.canonHeader[v] + if ok { + return cv + } + if sc.canonHeader == nil { + sc.canonHeader = make(map[string]string) + } + cv = http.CanonicalHeaderKey(v) + sc.canonHeader[v] = cv + return cv +} + +// readFrames is the loop that reads incoming frames. +// It's run on its own goroutine. +func (sc *serverConn) readFrames() { + g := make(gate, 1) + for { + f, err := sc.framer.ReadFrame() + if err != nil { + sc.readFrameErrCh <- err + close(sc.readFrameCh) + return + } + sc.readFrameCh <- frameAndGate{f, g} + // We can't read another frame until this one is + // processed, as the ReadFrame interface doesn't copy + // memory. The Frame accessor methods access the last + // frame's (shared) buffer. So we wait for the + // serve goroutine to tell us it's done: + g.Wait() + } +} + +// writeFrameAsync runs in its own goroutine and writes a single frame +// and then reports when it's done. +// At most one goroutine can be running writeFrameAsync at a time per +// serverConn. +func (sc *serverConn) writeFrameAsync(wm frameWriteMsg) { + err := wm.write.writeFrame(sc) + if ch := wm.done; ch != nil { + select { + case ch <- err: + default: + panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wm.write)) + } + } + sc.wroteFrameCh <- struct{}{} // tickle frame selection scheduler +} + +func (sc *serverConn) closeAllStreamsOnConnClose() { + sc.serveG.check() + for _, st := range sc.streams { + sc.closeStream(st, errClientDisconnected) + } +} + +func (sc *serverConn) stopShutdownTimer() { + sc.serveG.check() + if t := sc.shutdownTimer; t != nil { + t.Stop() + } +} + +func (sc *serverConn) notePanic() { + if testHookOnPanicMu != nil { + testHookOnPanicMu.Lock() + defer testHookOnPanicMu.Unlock() + } + if testHookOnPanic != nil { + if e := recover(); e != nil { + if testHookOnPanic(sc, e) { + panic(e) + } + } + } +} + +func (sc *serverConn) serve() { + sc.serveG.check() + defer sc.notePanic() + defer sc.conn.Close() + defer sc.closeAllStreamsOnConnClose() + defer sc.stopShutdownTimer() + defer close(sc.doneServing) // unblocks handlers trying to send + + sc.vlogf("HTTP/2 connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) + + sc.writeFrame(frameWriteMsg{ + write: writeSettings{ + {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, + {SettingMaxConcurrentStreams, sc.advMaxStreams}, + + // TODO: more actual settings, notably + // SettingInitialWindowSize, but then we also + // want to bump up the conn window size the + // same amount here right after the settings + }, + }) + sc.unackedSettings++ + + if err := sc.readPreface(); err != nil { + sc.condlogf(err, "error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) + return + } + + go sc.readFrames() // closed by defer sc.conn.Close above + + settingsTimer := time.NewTimer(firstSettingsTimeout) + for { + select { + case wm := <-sc.wantWriteFrameCh: + sc.writeFrame(wm) + case <-sc.wroteFrameCh: + if sc.writingFrame != true { + panic("internal error: expected to be already writing a frame") + } + sc.writingFrame = false + sc.scheduleFrameWrite() + case fg, ok := <-sc.readFrameCh: + if !ok { + sc.readFrameCh = nil + } + if !sc.processFrameFromReader(fg, ok) { + return + } + if settingsTimer.C != nil { + settingsTimer.Stop() + settingsTimer.C = nil + } + case m := <-sc.bodyReadCh: + sc.noteBodyRead(m.st, m.n) + case <-settingsTimer.C: + sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr()) + return + case <-sc.shutdownTimerCh: + sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) + return + case fn := <-sc.testHookCh: + fn() + } + } +} + +// readPreface reads the ClientPreface greeting from the peer +// or returns an error on timeout or an invalid greeting. +func (sc *serverConn) readPreface() error { + errc := make(chan error, 1) + go func() { + // Read the client preface + buf := make([]byte, len(ClientPreface)) + if _, err := io.ReadFull(sc.conn, buf); err != nil { + errc <- err + } else if !bytes.Equal(buf, clientPreface) { + errc <- fmt.Errorf("bogus greeting %q", buf) + } else { + errc <- nil + } + }() + timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? + defer timer.Stop() + select { + case <-timer.C: + return errors.New("timeout waiting for client preface") + case err := <-errc: + if err == nil { + sc.vlogf("client %v said hello", sc.conn.RemoteAddr()) + } + return err + } +} + +// writeDataFromHandler writes the data described in req to stream.id. +// +// The provided ch is used to avoid allocating new channels for each +// write operation. It's expected that the caller reuses writeData and ch +// over time. +// +// The flow control currently happens in the Handler where it waits +// for 1 or more bytes to be available to then write here. So at this +// point we know that we have flow control. But this might have to +// change when priority is implemented, so the serve goroutine knows +// the total amount of bytes waiting to be sent and can can have more +// scheduling decisions available. +func (sc *serverConn) writeDataFromHandler(stream *stream, writeData *writeData, ch chan error) error { + sc.writeFrameFromHandler(frameWriteMsg{ + write: writeData, + stream: stream, + done: ch, + }) + select { + case err := <-ch: + return err + case <-sc.doneServing: + return errClientDisconnected + case <-stream.cw: + return errStreamBroken + } +} + +// writeFrameFromHandler sends wm to sc.wantWriteFrameCh, but aborts +// if the connection has gone away. +// +// This must not be run from the serve goroutine itself, else it might +// deadlock writing to sc.wantWriteFrameCh (which is only mildly +// buffered and is read by serve itself). If you're on the serve +// goroutine, call writeFrame instead. +func (sc *serverConn) writeFrameFromHandler(wm frameWriteMsg) { + sc.serveG.checkNotOn() // NOT + select { + case sc.wantWriteFrameCh <- wm: + case <-sc.doneServing: + // Client has closed their connection to the server. + } +} + +// writeFrame schedules a frame to write and sends it if there's nothing +// already being written. +// +// There is no pushback here (the serve goroutine never blocks). It's +// the http.Handlers that block, waiting for their previous frames to +// make it onto the wire +// +// If you're not on the serve goroutine, use writeFrameFromHandler instead. +func (sc *serverConn) writeFrame(wm frameWriteMsg) { + sc.serveG.check() + sc.writeSched.add(wm) + sc.scheduleFrameWrite() +} + +// startFrameWrite starts a goroutine to write wm (in a separate +// goroutine since that might block on the network), and updates the +// serve goroutine's state about the world, updated from info in wm. +func (sc *serverConn) startFrameWrite(wm frameWriteMsg) { + sc.serveG.check() + if sc.writingFrame { + panic("internal error: can only be writing one frame at a time") + } + sc.writingFrame = true + + st := wm.stream + if st != nil { + switch st.state { + case stateHalfClosedLocal: + panic("internal error: attempt to send frame on half-closed-local stream") + case stateClosed: + if st.sentReset || st.gotReset { + // Skip this frame. But fake the frame write to reschedule: + sc.wroteFrameCh <- struct{}{} + return + } + panic(fmt.Sprintf("internal error: attempt to send a write %v on a closed stream", wm)) + } + } + + sc.needsFrameFlush = true + if endsStream(wm.write) { + if st == nil { + panic("internal error: expecting non-nil stream") + } + switch st.state { + case stateOpen: + // Here we would go to stateHalfClosedLocal in + // theory, but since our handler is done and + // the net/http package provides no mechanism + // for finishing writing to a ResponseWriter + // while still reading data (see possible TODO + // at top of this file), we go into closed + // state here anyway, after telling the peer + // we're hanging up on them. + st.state = stateHalfClosedLocal // won't last long, but necessary for closeStream via resetStream + errCancel := StreamError{st.id, ErrCodeCancel} + sc.resetStream(errCancel) + case stateHalfClosedRemote: + sc.closeStream(st, nil) + } + } + go sc.writeFrameAsync(wm) +} + +// scheduleFrameWrite tickles the frame writing scheduler. +// +// If a frame is already being written, nothing happens. This will be called again +// when the frame is done being written. +// +// If a frame isn't being written we need to send one, the best frame +// to send is selected, preferring first things that aren't +// stream-specific (e.g. ACKing settings), and then finding the +// highest priority stream. +// +// If a frame isn't being written and there's nothing else to send, we +// flush the write buffer. +func (sc *serverConn) scheduleFrameWrite() { + sc.serveG.check() + if sc.writingFrame { + return + } + if sc.needToSendGoAway { + sc.needToSendGoAway = false + sc.startFrameWrite(frameWriteMsg{ + write: &writeGoAway{ + maxStreamID: sc.maxStreamID, + code: sc.goAwayCode, + }, + }) + return + } + if sc.needToSendSettingsAck { + sc.needToSendSettingsAck = false + sc.startFrameWrite(frameWriteMsg{write: writeSettingsAck{}}) + return + } + if !sc.inGoAway { + if wm, ok := sc.writeSched.take(); ok { + sc.startFrameWrite(wm) + return + } + } + if sc.needsFrameFlush { + sc.startFrameWrite(frameWriteMsg{write: flushFrameWriter{}}) + sc.needsFrameFlush = false // after startFrameWrite, since it sets this true + return + } +} + +func (sc *serverConn) goAway(code ErrCode) { + sc.serveG.check() + if sc.inGoAway { + return + } + if code != ErrCodeNo { + sc.shutDownIn(250 * time.Millisecond) + } else { + // TODO: configurable + sc.shutDownIn(1 * time.Second) + } + sc.inGoAway = true + sc.needToSendGoAway = true + sc.goAwayCode = code + sc.scheduleFrameWrite() +} + +func (sc *serverConn) shutDownIn(d time.Duration) { + sc.serveG.check() + sc.shutdownTimer = time.NewTimer(d) + sc.shutdownTimerCh = sc.shutdownTimer.C +} + +func (sc *serverConn) resetStream(se StreamError) { + sc.serveG.check() + sc.writeFrame(frameWriteMsg{write: se}) + if st, ok := sc.streams[se.StreamID]; ok { + st.sentReset = true + sc.closeStream(st, se) + } +} + +// curHeaderStreamID returns the stream ID of the header block we're +// currently in the middle of reading. If this returns non-zero, the +// next frame must be a CONTINUATION with this stream id. +func (sc *serverConn) curHeaderStreamID() uint32 { + sc.serveG.check() + st := sc.req.stream + if st == nil { + return 0 + } + return st.id +} + +// processFrameFromReader processes the serve loop's read from readFrameCh from the +// frame-reading goroutine. +// processFrameFromReader returns whether the connection should be kept open. +func (sc *serverConn) processFrameFromReader(fg frameAndGate, fgValid bool) bool { + sc.serveG.check() + var clientGone bool + var err error + if !fgValid { + err = <-sc.readFrameErrCh + if err == ErrFrameTooLarge { + sc.goAway(ErrCodeFrameSize) + return true // goAway will close the loop + } + clientGone = err == io.EOF || strings.Contains(err.Error(), "use of closed network connection") + if clientGone { + // TODO: could we also get into this state if + // the peer does a half close + // (e.g. CloseWrite) because they're done + // sending frames but they're still wanting + // our open replies? Investigate. + // TODO: add CloseWrite to crypto/tls.Conn first + // so we have a way to test this? I suppose + // just for testing we could have a non-TLS mode. + return false + } + } + + if fgValid { + f := fg.f + sc.vlogf("got %v: %#v", f.Header(), f) + err = sc.processFrame(f) + fg.g.Done() // unblock the readFrames goroutine + if err == nil { + return true + } + } + + switch ev := err.(type) { + case StreamError: + sc.resetStream(ev) + return true + case goAwayFlowError: + sc.goAway(ErrCodeFlowControl) + return true + case ConnectionError: + sc.logf("%v: %v", sc.conn.RemoteAddr(), ev) + sc.goAway(ErrCode(ev)) + return true // goAway will handle shutdown + default: + if !fgValid { + sc.logf("disconnecting; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err) + } else { + sc.logf("disconnection due to other error: %v", err) + } + } + return false +} + +func (sc *serverConn) processFrame(f Frame) error { + sc.serveG.check() + + // First frame received must be SETTINGS. + if !sc.sawFirstSettings { + if _, ok := f.(*SettingsFrame); !ok { + return ConnectionError(ErrCodeProtocol) + } + sc.sawFirstSettings = true + } + + if s := sc.curHeaderStreamID(); s != 0 { + if cf, ok := f.(*ContinuationFrame); !ok { + return ConnectionError(ErrCodeProtocol) + } else if cf.Header().StreamID != s { + return ConnectionError(ErrCodeProtocol) + } + } + + switch f := f.(type) { + case *SettingsFrame: + return sc.processSettings(f) + case *HeadersFrame: + return sc.processHeaders(f) + case *ContinuationFrame: + return sc.processContinuation(f) + case *WindowUpdateFrame: + return sc.processWindowUpdate(f) + case *PingFrame: + return sc.processPing(f) + case *DataFrame: + return sc.processData(f) + case *RSTStreamFrame: + return sc.processResetStream(f) + case *PriorityFrame: + return sc.processPriority(f) + case *PushPromiseFrame: + // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE + // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR. + return ConnectionError(ErrCodeProtocol) + default: + log.Printf("Ignoring frame: %v", f.Header()) + return nil + } +} + +func (sc *serverConn) processPing(f *PingFrame) error { + sc.serveG.check() + if f.Flags.Has(FlagSettingsAck) { + // 6.7 PING: " An endpoint MUST NOT respond to PING frames + // containing this flag." + return nil + } + if f.StreamID != 0 { + // "PING frames are not associated with any individual + // stream. If a PING frame is received with a stream + // identifier field value other than 0x0, the recipient MUST + // respond with a connection error (Section 5.4.1) of type + // PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } + sc.writeFrame(frameWriteMsg{write: writePingAck{f}}) + return nil +} + +func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error { + sc.serveG.check() + switch { + case f.StreamID != 0: // stream-level flow control + st := sc.streams[f.StreamID] + if st == nil { + // "WINDOW_UPDATE can be sent by a peer that has sent a + // frame bearing the END_STREAM flag. This means that a + // receiver could receive a WINDOW_UPDATE frame on a "half + // closed (remote)" or "closed" stream. A receiver MUST + // NOT treat this as an error, see Section 5.1." + return nil + } + if !st.flow.add(int32(f.Increment)) { + return StreamError{f.StreamID, ErrCodeFlowControl} + } + default: // connection-level flow control + if !sc.flow.add(int32(f.Increment)) { + return goAwayFlowError{} + } + } + sc.scheduleFrameWrite() + return nil +} + +func (sc *serverConn) processResetStream(f *RSTStreamFrame) error { + sc.serveG.check() + + state, st := sc.state(f.StreamID) + if state == stateIdle { + // 6.4 "RST_STREAM frames MUST NOT be sent for a + // stream in the "idle" state. If a RST_STREAM frame + // identifying an idle stream is received, the + // recipient MUST treat this as a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + return ConnectionError(ErrCodeProtocol) + } + if st != nil { + st.gotReset = true + sc.closeStream(st, StreamError{f.StreamID, f.ErrCode}) + } + return nil +} + +func (sc *serverConn) closeStream(st *stream, err error) { + sc.serveG.check() + if st.state == stateIdle || st.state == stateClosed { + panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state)) + } + st.state = stateClosed + sc.curOpenStreams-- + delete(sc.streams, st.id) + if p := st.body; p != nil { + p.Close(err) + } + st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc + sc.writeSched.forgetStream(st.id) +} + +func (sc *serverConn) processSettings(f *SettingsFrame) error { + sc.serveG.check() + if f.IsAck() { + sc.unackedSettings-- + if sc.unackedSettings < 0 { + // Why is the peer ACKing settings we never sent? + // The spec doesn't mention this case, but + // hang up on them anyway. + return ConnectionError(ErrCodeProtocol) + } + return nil + } + if err := f.ForeachSetting(sc.processSetting); err != nil { + return err + } + sc.needToSendSettingsAck = true + sc.scheduleFrameWrite() + return nil +} + +func (sc *serverConn) processSetting(s Setting) error { + sc.serveG.check() + if err := s.Valid(); err != nil { + return err + } + sc.vlogf("processing setting %v", s) + switch s.ID { + case SettingHeaderTableSize: + sc.headerTableSize = s.Val + sc.hpackEncoder.SetMaxDynamicTableSize(s.Val) + case SettingEnablePush: + sc.pushEnabled = s.Val != 0 + case SettingMaxConcurrentStreams: + sc.clientMaxStreams = s.Val + case SettingInitialWindowSize: + return sc.processSettingInitialWindowSize(s.Val) + case SettingMaxFrameSize: + sc.writeSched.maxFrameSize = s.Val + case SettingMaxHeaderListSize: + sc.maxHeaderListSize = s.Val + default: + // Unknown setting: "An endpoint that receives a SETTINGS + // frame with any unknown or unsupported identifier MUST + // ignore that setting." + } + return nil +} + +func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { + sc.serveG.check() + // Note: val already validated to be within range by + // processSetting's Valid call. + + // "A SETTINGS frame can alter the initial flow control window + // size for all current streams. When the value of + // SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST + // adjust the size of all stream flow control windows that it + // maintains by the difference between the new value and the + // old value." + old := sc.initialWindowSize + sc.initialWindowSize = int32(val) + growth := sc.initialWindowSize - old // may be negative + for _, st := range sc.streams { + if !st.flow.add(growth) { + // 6.9.2 Initial Flow Control Window Size + // "An endpoint MUST treat a change to + // SETTINGS_INITIAL_WINDOW_SIZE that causes any flow + // control window to exceed the maximum size as a + // connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR." + return ConnectionError(ErrCodeFlowControl) + } + } + return nil +} + +func (sc *serverConn) processData(f *DataFrame) error { + sc.serveG.check() + // "If a DATA frame is received whose stream is not in "open" + // or "half closed (local)" state, the recipient MUST respond + // with a stream error (Section 5.4.2) of type STREAM_CLOSED." + id := f.Header().StreamID + st, ok := sc.streams[id] + if !ok || st.state != stateOpen { + // This includes sending a RST_STREAM if the stream is + // in stateHalfClosedLocal (which currently means that + // the http.Handler returned, so it's done reading & + // done writing). Try to stop the client from sending + // more DATA. + return StreamError{id, ErrCodeStreamClosed} + } + if st.body == nil { + panic("internal error: should have a body in this state") + } + data := f.Data() + + // Sender sending more than they'd declared? + if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { + st.body.Close(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) + return StreamError{id, ErrCodeStreamClosed} + } + if len(data) > 0 { + // Check whether the client has flow control quota. + if int(st.inflow.available()) < len(data) { + return StreamError{id, ErrCodeFlowControl} + } + st.inflow.take(int32(len(data))) + wrote, err := st.body.Write(data) + if err != nil { + return StreamError{id, ErrCodeStreamClosed} + } + if wrote != len(data) { + panic("internal error: bad Writer") + } + st.bodyBytes += int64(len(data)) + } + if f.StreamEnded() { + if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes { + st.body.Close(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes", + st.declBodyBytes, st.bodyBytes)) + } else { + st.body.Close(io.EOF) + } + st.state = stateHalfClosedRemote + } + return nil +} + +func (sc *serverConn) processHeaders(f *HeadersFrame) error { + sc.serveG.check() + id := f.Header().StreamID + if sc.inGoAway { + // Ignore. + return nil + } + // http://http2.github.io/http2-spec/#rfc.section.5.1.1 + if id%2 != 1 || id <= sc.maxStreamID || sc.req.stream != nil { + // Streams initiated by a client MUST use odd-numbered + // stream identifiers. [...] The identifier of a newly + // established stream MUST be numerically greater than all + // streams that the initiating endpoint has opened or + // reserved. [...] An endpoint that receives an unexpected + // stream identifier MUST respond with a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + return ConnectionError(ErrCodeProtocol) + } + if id > sc.maxStreamID { + sc.maxStreamID = id + } + st := &stream{ + id: id, + state: stateOpen, + } + if f.StreamEnded() { + st.state = stateHalfClosedRemote + } + st.cw.Init() + + st.flow.conn = &sc.flow // link to conn-level counter + st.flow.add(sc.initialWindowSize) + st.inflow.conn = &sc.inflow // link to conn-level counter + st.inflow.add(initialWindowSize) // TODO: update this when we send a higher initial window size in the initial settings + + sc.streams[id] = st + if f.HasPriority() { + adjustStreamPriority(sc.streams, st.id, f.Priority) + } + sc.curOpenStreams++ + sc.req = requestParam{ + stream: st, + header: make(http.Header), + } + return sc.processHeaderBlockFragment(st, f.HeaderBlockFragment(), f.HeadersEnded()) +} + +func (sc *serverConn) processContinuation(f *ContinuationFrame) error { + sc.serveG.check() + st := sc.streams[f.Header().StreamID] + if st == nil || sc.curHeaderStreamID() != st.id { + return ConnectionError(ErrCodeProtocol) + } + return sc.processHeaderBlockFragment(st, f.HeaderBlockFragment(), f.HeadersEnded()) +} + +func (sc *serverConn) processHeaderBlockFragment(st *stream, frag []byte, end bool) error { + sc.serveG.check() + if _, err := sc.hpackDecoder.Write(frag); err != nil { + // TODO: convert to stream error I assume? + return err + } + if !end { + return nil + } + if err := sc.hpackDecoder.Close(); err != nil { + // TODO: convert to stream error I assume? + return err + } + defer sc.resetPendingRequest() + if sc.curOpenStreams > sc.advMaxStreams { + // "Endpoints MUST NOT exceed the limit set by their + // peer. An endpoint that receives a HEADERS frame + // that causes their advertised concurrent stream + // limit to be exceeded MUST treat this as a stream + // error (Section 5.4.2) of type PROTOCOL_ERROR or + // REFUSED_STREAM." + if sc.unackedSettings == 0 { + // They should know better. + return StreamError{st.id, ErrCodeProtocol} + } + // Assume it's a network race, where they just haven't + // received our last SETTINGS update. But actually + // this can't happen yet, because we don't yet provide + // a way for users to adjust server parameters at + // runtime. + return StreamError{st.id, ErrCodeRefusedStream} + } + + rw, req, err := sc.newWriterAndRequest() + if err != nil { + return err + } + st.body = req.Body.(*requestBody).pipe // may be nil + st.declBodyBytes = req.ContentLength + go sc.runHandler(rw, req) + return nil +} + +func (sc *serverConn) processPriority(f *PriorityFrame) error { + adjustStreamPriority(sc.streams, f.StreamID, f.PriorityParam) + return nil +} + +func adjustStreamPriority(streams map[uint32]*stream, streamID uint32, priority PriorityParam) { + st, ok := streams[streamID] + if !ok { + // TODO: not quite correct (this streamID might + // already exist in the dep tree, but be closed), but + // close enough for now. + return + } + st.weight = priority.Weight + parent := streams[priority.StreamDep] // might be nil + if parent == st { + // if client tries to set this stream to be the parent of itself + // ignore and keep going + return + } + + // section 5.3.3: If a stream is made dependent on one of its + // own dependencies, the formerly dependent stream is first + // moved to be dependent on the reprioritized stream's previous + // parent. The moved dependency retains its weight. + for piter := parent; piter != nil; piter = piter.parent { + if piter == st { + parent.parent = st.parent + break + } + } + st.parent = parent + if priority.Exclusive && (st.parent != nil || priority.StreamDep == 0) { + for _, openStream := range streams { + if openStream != st && openStream.parent == st.parent { + openStream.parent = st + } + } + } +} + +// resetPendingRequest zeros out all state related to a HEADERS frame +// and its zero or more CONTINUATION frames sent to start a new +// request. +func (sc *serverConn) resetPendingRequest() { + sc.serveG.check() + sc.req = requestParam{} +} + +func (sc *serverConn) newWriterAndRequest() (*responseWriter, *http.Request, error) { + sc.serveG.check() + rp := &sc.req + if rp.invalidHeader || rp.method == "" || rp.path == "" || + (rp.scheme != "https" && rp.scheme != "http") { + // See 8.1.2.6 Malformed Requests and Responses: + // + // Malformed requests or responses that are detected + // MUST be treated as a stream error (Section 5.4.2) + // of type PROTOCOL_ERROR." + // + // 8.1.2.3 Request Pseudo-Header Fields + // "All HTTP/2 requests MUST include exactly one valid + // value for the :method, :scheme, and :path + // pseudo-header fields" + return nil, nil, StreamError{rp.stream.id, ErrCodeProtocol} + } + var tlsState *tls.ConnectionState // nil if not scheme https + if rp.scheme == "https" { + tlsState = sc.tlsState + } + authority := rp.authority + if authority == "" { + authority = rp.header.Get("Host") + } + needsContinue := rp.header.Get("Expect") == "100-continue" + if needsContinue { + rp.header.Del("Expect") + } + bodyOpen := rp.stream.state == stateOpen + body := &requestBody{ + conn: sc, + stream: rp.stream, + needsContinue: needsContinue, + } + // TODO: handle asterisk '*' requests + test + url, err := url.ParseRequestURI(rp.path) + if err != nil { + // TODO: find the right error code? + return nil, nil, StreamError{rp.stream.id, ErrCodeProtocol} + } + req := &http.Request{ + Method: rp.method, + URL: url, + RemoteAddr: sc.remoteAddrStr, + Header: rp.header, + RequestURI: rp.path, + Proto: "HTTP/2.0", + ProtoMajor: 2, + ProtoMinor: 0, + TLS: tlsState, + Host: authority, + Body: body, + } + if bodyOpen { + body.pipe = &pipe{ + b: buffer{buf: make([]byte, initialWindowSize)}, // TODO: share/remove XXX + } + body.pipe.c.L = &body.pipe.m + + if vv, ok := rp.header["Content-Length"]; ok { + req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) + } else { + req.ContentLength = -1 + } + } + + rws := responseWriterStatePool.Get().(*responseWriterState) + bwSave := rws.bw + *rws = responseWriterState{} // zero all the fields + rws.conn = sc + rws.bw = bwSave + rws.bw.Reset(chunkWriter{rws}) + rws.stream = rp.stream + rws.req = req + rws.body = body + rws.frameWriteCh = make(chan error, 1) + + rw := &responseWriter{rws: rws} + return rw, req, nil +} + +// Run on its own goroutine. +func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request) { + defer rw.handlerDone() + // TODO: catch panics like net/http.Server + sc.handler.ServeHTTP(rw, req) +} + +// called from handler goroutines. +// h may be nil. +func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders, tempCh chan error) { + sc.serveG.checkNotOn() // NOT on + var errc chan error + if headerData.h != nil { + // If there's a header map (which we don't own), so we have to block on + // waiting for this frame to be written, so an http.Flush mid-handler + // writes out the correct value of keys, before a handler later potentially + // mutates it. + errc = tempCh + } + sc.writeFrameFromHandler(frameWriteMsg{ + write: headerData, + stream: st, + done: errc, + }) + if errc != nil { + select { + case <-errc: + // Ignore. Just for synchronization. + // Any error will be handled in the writing goroutine. + case <-sc.doneServing: + // Client has closed the connection. + } + } +} + +// called from handler goroutines. +func (sc *serverConn) write100ContinueHeaders(st *stream) { + sc.writeFrameFromHandler(frameWriteMsg{ + write: write100ContinueHeadersFrame{st.id}, + stream: st, + }) +} + +// A bodyReadMsg tells the server loop that the http.Handler read n +// bytes of the DATA from the client on the given stream. +type bodyReadMsg struct { + st *stream + n int +} + +// called from handler goroutines. +// Notes that the handler for the given stream ID read n bytes of its body +// and schedules flow control tokens to be sent. +func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int) { + sc.serveG.checkNotOn() // NOT on + sc.bodyReadCh <- bodyReadMsg{st, n} +} + +func (sc *serverConn) noteBodyRead(st *stream, n int) { + sc.serveG.check() + sc.sendWindowUpdate(nil, n) // conn-level + if st.state != stateHalfClosedRemote && st.state != stateClosed { + // Don't send this WINDOW_UPDATE if the stream is closed + // remotely. + sc.sendWindowUpdate(st, n) + } +} + +// st may be nil for conn-level +func (sc *serverConn) sendWindowUpdate(st *stream, n int) { + sc.serveG.check() + // "The legal range for the increment to the flow control + // window is 1 to 2^31-1 (2,147,483,647) octets." + // A Go Read call on 64-bit machines could in theory read + // a larger Read than this. Very unlikely, but we handle it here + // rather than elsewhere for now. + const maxUint31 = 1<<31 - 1 + for n >= maxUint31 { + sc.sendWindowUpdate32(st, maxUint31) + n -= maxUint31 + } + sc.sendWindowUpdate32(st, int32(n)) +} + +// st may be nil for conn-level +func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { + sc.serveG.check() + if n == 0 { + return + } + if n < 0 { + panic("negative update") + } + var streamID uint32 + if st != nil { + streamID = st.id + } + sc.writeFrame(frameWriteMsg{ + write: writeWindowUpdate{streamID: streamID, n: uint32(n)}, + stream: st, + }) + var ok bool + if st == nil { + ok = sc.inflow.add(n) + } else { + ok = st.inflow.add(n) + } + if !ok { + panic("internal error; sent too many window updates without decrements?") + } +} + +type requestBody struct { + stream *stream + conn *serverConn + closed bool + pipe *pipe // non-nil if we have a HTTP entity message body + needsContinue bool // need to send a 100-continue +} + +func (b *requestBody) Close() error { + if b.pipe != nil { + b.pipe.Close(errClosedBody) + } + b.closed = true + return nil +} + +func (b *requestBody) Read(p []byte) (n int, err error) { + if b.needsContinue { + b.needsContinue = false + b.conn.write100ContinueHeaders(b.stream) + } + if b.pipe == nil { + return 0, io.EOF + } + n, err = b.pipe.Read(p) + if n > 0 { + b.conn.noteBodyReadFromHandler(b.stream, n) + } + return +} + +// responseWriter is the http.ResponseWriter implementation. It's +// intentionally small (1 pointer wide) to minimize garbage. The +// responseWriterState pointer inside is zeroed at the end of a +// request (in handlerDone) and calls on the responseWriter thereafter +// simply crash (caller's mistake), but the much larger responseWriterState +// and buffers are reused between multiple requests. +type responseWriter struct { + rws *responseWriterState +} + +// Optional http.ResponseWriter interfaces implemented. +var ( + _ http.CloseNotifier = (*responseWriter)(nil) + _ http.Flusher = (*responseWriter)(nil) + _ stringWriter = (*responseWriter)(nil) +) + +type responseWriterState struct { + // immutable within a request: + stream *stream + req *http.Request + body *requestBody // to close at end of request, if DATA frames didn't + conn *serverConn + + // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc + bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState} + + // mutated by http.Handler goroutine: + handlerHeader http.Header // nil until called + snapHeader http.Header // snapshot of handlerHeader at WriteHeader time + status int // status code passed to WriteHeader + wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. + sentHeader bool // have we sent the header frame? + handlerDone bool // handler has finished + curWrite writeData + frameWriteCh chan error // re-used whenever we need to block on a frame being written + + closeNotifierMu sync.Mutex // guards closeNotifierCh + closeNotifierCh chan bool // nil until first used +} + +type chunkWriter struct{ rws *responseWriterState } + +func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) } + +// writeChunk writes chunks from the bufio.Writer. But because +// bufio.Writer may bypass its chunking, sometimes p may be +// arbitrarily large. +// +// writeChunk is also responsible (on the first chunk) for sending the +// HEADER response. +func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { + if !rws.wroteHeader { + rws.writeHeader(200) + } + if !rws.sentHeader { + rws.sentHeader = true + var ctype, clen string // implicit ones, if we can calculate it + if rws.handlerDone && rws.snapHeader.Get("Content-Length") == "" { + clen = strconv.Itoa(len(p)) + } + if rws.snapHeader.Get("Content-Type") == "" { + ctype = http.DetectContentType(p) + } + endStream := rws.handlerDone && len(p) == 0 + rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + streamID: rws.stream.id, + httpResCode: rws.status, + h: rws.snapHeader, + endStream: endStream, + contentType: ctype, + contentLength: clen, + }, rws.frameWriteCh) + if endStream { + return 0, nil + } + } + if len(p) == 0 && !rws.handlerDone { + return 0, nil + } + curWrite := &rws.curWrite + curWrite.streamID = rws.stream.id + curWrite.p = p + curWrite.endStream = rws.handlerDone + if err := rws.conn.writeDataFromHandler(rws.stream, curWrite, rws.frameWriteCh); err != nil { + return 0, err + } + return len(p), nil +} + +func (w *responseWriter) Flush() { + rws := w.rws + if rws == nil { + panic("Header called after Handler finished") + } + if rws.bw.Buffered() > 0 { + if err := rws.bw.Flush(); err != nil { + // Ignore the error. The frame writer already knows. + return + } + } else { + // The bufio.Writer won't call chunkWriter.Write + // (writeChunk with zero bytes, so we have to do it + // ourselves to force the HTTP response header and/or + // final DATA frame (with END_STREAM) to be sent. + rws.writeChunk(nil) + } +} + +func (w *responseWriter) CloseNotify() <-chan bool { + rws := w.rws + if rws == nil { + panic("CloseNotify called after Handler finished") + } + rws.closeNotifierMu.Lock() + ch := rws.closeNotifierCh + if ch == nil { + ch = make(chan bool, 1) + rws.closeNotifierCh = ch + go func() { + rws.stream.cw.Wait() // wait for close + ch <- true + }() + } + rws.closeNotifierMu.Unlock() + return ch +} + +func (w *responseWriter) Header() http.Header { + rws := w.rws + if rws == nil { + panic("Header called after Handler finished") + } + if rws.handlerHeader == nil { + rws.handlerHeader = make(http.Header) + } + return rws.handlerHeader +} + +func (w *responseWriter) WriteHeader(code int) { + rws := w.rws + if rws == nil { + panic("WriteHeader called after Handler finished") + } + rws.writeHeader(code) +} + +func (rws *responseWriterState) writeHeader(code int) { + if !rws.wroteHeader { + rws.wroteHeader = true + rws.status = code + if len(rws.handlerHeader) > 0 { + rws.snapHeader = cloneHeader(rws.handlerHeader) + } + } +} + +func cloneHeader(h http.Header) http.Header { + h2 := make(http.Header, len(h)) + for k, vv := range h { + vv2 := make([]string, len(vv)) + copy(vv2, vv) + h2[k] = vv2 + } + return h2 +} + +// The Life Of A Write is like this: +// +// * Handler calls w.Write or w.WriteString -> +// * -> rws.bw (*bufio.Writer) -> +// * (Handler migth call Flush) +// * -> chunkWriter{rws} +// * -> responseWriterState.writeChunk(p []byte) +// * -> responseWriterState.writeChunk (most of the magic; see comment there) +func (w *responseWriter) Write(p []byte) (n int, err error) { + return w.write(len(p), p, "") +} + +func (w *responseWriter) WriteString(s string) (n int, err error) { + return w.write(len(s), nil, s) +} + +// either dataB or dataS is non-zero. +func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) { + rws := w.rws + if rws == nil { + panic("Write called after Handler finished") + } + if !rws.wroteHeader { + w.WriteHeader(200) + } + if dataB != nil { + return rws.bw.Write(dataB) + } else { + return rws.bw.WriteString(dataS) + } +} + +func (w *responseWriter) handlerDone() { + rws := w.rws + if rws == nil { + panic("handlerDone called twice") + } + rws.handlerDone = true + w.Flush() + w.rws = nil + responseWriterStatePool.Put(rws) +} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/server_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/server_test.go new file mode 100644 index 000000000..1133223b8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/server_test.go @@ -0,0 +1,2252 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// See https://code.google.com/p/go/source/browse/CONTRIBUTORS +// Licensed under the same terms as Go itself: +// https://code.google.com/p/go/source/browse/LICENSE + +package http2 + +import ( + "bytes" + "crypto/tls" + "errors" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "net/http/httptest" + "os" + "reflect" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/bradfitz/http2/hpack" +) + +var stderrVerbose = flag.Bool("stderr_verbose", false, "Mirror verbosity to stderr, unbuffered") + +type serverTester struct { + cc net.Conn // client conn + t testing.TB + ts *httptest.Server + fr *Framer + logBuf *bytes.Buffer + logFilter []string // substrings to filter out + scMu sync.Mutex // guards sc + sc *serverConn + + // writing headers: + headerBuf bytes.Buffer + hpackEnc *hpack.Encoder + + // reading frames: + frc chan Frame + frErrc chan error + readTimer *time.Timer +} + +func init() { + testHookOnPanicMu = new(sync.Mutex) +} + +func resetHooks() { + testHookOnPanicMu.Lock() + testHookOnPanic = nil + testHookOnPanicMu.Unlock() +} + +type serverTesterOpt string + +var optOnlyServer = serverTesterOpt("only_server") + +func newServerTester(t testing.TB, handler http.HandlerFunc, opts ...interface{}) *serverTester { + resetHooks() + + logBuf := new(bytes.Buffer) + ts := httptest.NewUnstartedServer(handler) + + tlsConfig := &tls.Config{ + InsecureSkipVerify: true, + // The h2-14 is temporary, until curl is updated. (as used by unit tests + // in Docker) + NextProtos: []string{NextProtoTLS, "h2-14"}, + } + + onlyServer := false + for _, opt := range opts { + switch v := opt.(type) { + case func(*tls.Config): + v(tlsConfig) + case func(*httptest.Server): + v(ts) + case serverTesterOpt: + onlyServer = (v == optOnlyServer) + default: + t.Fatalf("unknown newServerTester option type %T", v) + } + } + + ConfigureServer(ts.Config, &Server{}) + + st := &serverTester{ + t: t, + ts: ts, + logBuf: logBuf, + frc: make(chan Frame, 1), + frErrc: make(chan error, 1), + } + st.hpackEnc = hpack.NewEncoder(&st.headerBuf) + + var stderrv io.Writer = ioutil.Discard + if *stderrVerbose { + stderrv = os.Stderr + } + + ts.TLS = ts.Config.TLSConfig // the httptest.Server has its own copy of this TLS config + ts.Config.ErrorLog = log.New(io.MultiWriter(stderrv, twriter{t: t, st: st}, logBuf), "", log.LstdFlags) + ts.StartTLS() + + if VerboseLogs { + t.Logf("Running test server at: %s", ts.URL) + } + testHookGetServerConn = func(v *serverConn) { + st.scMu.Lock() + defer st.scMu.Unlock() + st.sc = v + st.sc.testHookCh = make(chan func()) + } + log.SetOutput(io.MultiWriter(stderrv, twriter{t: t, st: st})) + if !onlyServer { + cc, err := tls.Dial("tcp", ts.Listener.Addr().String(), tlsConfig) + if err != nil { + t.Fatal(err) + } + st.cc = cc + st.fr = NewFramer(cc, cc) + } + + return st +} + +func (st *serverTester) closeConn() { + st.scMu.Lock() + defer st.scMu.Unlock() + st.sc.conn.Close() +} + +func (st *serverTester) addLogFilter(phrase string) { + st.logFilter = append(st.logFilter, phrase) +} + +func (st *serverTester) stream(id uint32) *stream { + ch := make(chan *stream, 1) + st.sc.testHookCh <- func() { + ch <- st.sc.streams[id] + } + return <-ch +} + +func (st *serverTester) streamState(id uint32) streamState { + ch := make(chan streamState, 1) + st.sc.testHookCh <- func() { + state, _ := st.sc.state(id) + ch <- state + } + return <-ch +} + +func (st *serverTester) Close() { + st.ts.Close() + if st.cc != nil { + st.cc.Close() + } + log.SetOutput(os.Stderr) +} + +// greet initiates the client's HTTP/2 connection into a state where +// frames may be sent. +func (st *serverTester) greet() { + st.writePreface() + st.writeInitialSettings() + st.wantSettings() + st.writeSettingsAck() + st.wantSettingsAck() +} + +func (st *serverTester) writePreface() { + n, err := st.cc.Write(clientPreface) + if err != nil { + st.t.Fatalf("Error writing client preface: %v", err) + } + if n != len(clientPreface) { + st.t.Fatalf("Writing client preface, wrote %d bytes; want %d", n, len(clientPreface)) + } +} + +func (st *serverTester) writeInitialSettings() { + if err := st.fr.WriteSettings(); err != nil { + st.t.Fatalf("Error writing initial SETTINGS frame from client to server: %v", err) + } +} + +func (st *serverTester) writeSettingsAck() { + if err := st.fr.WriteSettingsAck(); err != nil { + st.t.Fatalf("Error writing ACK of server's SETTINGS: %v", err) + } +} + +func (st *serverTester) writeHeaders(p HeadersFrameParam) { + if err := st.fr.WriteHeaders(p); err != nil { + st.t.Fatalf("Error writing HEADERS: %v", err) + } +} + +func (st *serverTester) encodeHeaderField(k, v string) { + err := st.hpackEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) + if err != nil { + st.t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err) + } +} + +// encodeHeader encodes headers and returns their HPACK bytes. headers +// must contain an even number of key/value pairs. There may be +// multiple pairs for keys (e.g. "cookie"). The :method, :path, and +// :scheme headers default to GET, / and https. +func (st *serverTester) encodeHeader(headers ...string) []byte { + if len(headers)%2 == 1 { + panic("odd number of kv args") + } + + st.headerBuf.Reset() + + if len(headers) == 0 { + // Fast path, mostly for benchmarks, so test code doesn't pollute + // profiles when we're looking to improve server allocations. + st.encodeHeaderField(":method", "GET") + st.encodeHeaderField(":path", "/") + st.encodeHeaderField(":scheme", "https") + return st.headerBuf.Bytes() + } + + if len(headers) == 2 && headers[0] == ":method" { + // Another fast path for benchmarks. + st.encodeHeaderField(":method", headers[1]) + st.encodeHeaderField(":path", "/") + st.encodeHeaderField(":scheme", "https") + return st.headerBuf.Bytes() + } + + pseudoCount := map[string]int{} + keys := []string{":method", ":path", ":scheme"} + vals := map[string][]string{ + ":method": {"GET"}, + ":path": {"/"}, + ":scheme": {"https"}, + } + for len(headers) > 0 { + k, v := headers[0], headers[1] + headers = headers[2:] + if _, ok := vals[k]; !ok { + keys = append(keys, k) + } + if strings.HasPrefix(k, ":") { + pseudoCount[k]++ + if pseudoCount[k] == 1 { + vals[k] = []string{v} + } else { + // Allows testing of invalid headers w/ dup pseudo fields. + vals[k] = append(vals[k], v) + } + } else { + vals[k] = append(vals[k], v) + } + } + st.headerBuf.Reset() + for _, k := range keys { + for _, v := range vals[k] { + st.encodeHeaderField(k, v) + } + } + return st.headerBuf.Bytes() +} + +// bodylessReq1 writes a HEADERS frames with StreamID 1 and EndStream and EndHeaders set. +func (st *serverTester) bodylessReq1(headers ...string) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(headers...), + EndStream: true, + EndHeaders: true, + }) +} + +func (st *serverTester) writeData(streamID uint32, endStream bool, data []byte) { + if err := st.fr.WriteData(streamID, endStream, data); err != nil { + st.t.Fatalf("Error writing DATA: %v", err) + } +} + +func (st *serverTester) readFrame() (Frame, error) { + go func() { + fr, err := st.fr.ReadFrame() + if err != nil { + st.frErrc <- err + } else { + st.frc <- fr + } + }() + t := st.readTimer + if t == nil { + t = time.NewTimer(2 * time.Second) + st.readTimer = t + } + t.Reset(2 * time.Second) + defer t.Stop() + select { + case f := <-st.frc: + return f, nil + case err := <-st.frErrc: + return nil, err + case <-t.C: + return nil, errors.New("timeout waiting for frame") + } +} + +func (st *serverTester) wantHeaders() *HeadersFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a HEADERS frame: %v", err) + } + hf, ok := f.(*HeadersFrame) + if !ok { + st.t.Fatalf("got a %T; want *HeadersFrame", f) + } + return hf +} + +func (st *serverTester) wantContinuation() *ContinuationFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a CONTINUATION frame: %v", err) + } + cf, ok := f.(*ContinuationFrame) + if !ok { + st.t.Fatalf("got a %T; want *ContinuationFrame", f) + } + return cf +} + +func (st *serverTester) wantData() *DataFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a DATA frame: %v", err) + } + df, ok := f.(*DataFrame) + if !ok { + st.t.Fatalf("got a %T; want *DataFrame", f) + } + return df +} + +func (st *serverTester) wantSettings() *SettingsFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a SETTINGS frame: %v", err) + } + sf, ok := f.(*SettingsFrame) + if !ok { + st.t.Fatalf("got a %T; want *SettingsFrame", f) + } + return sf +} + +func (st *serverTester) wantPing() *PingFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a PING frame: %v", err) + } + pf, ok := f.(*PingFrame) + if !ok { + st.t.Fatalf("got a %T; want *PingFrame", f) + } + return pf +} + +func (st *serverTester) wantGoAway() *GoAwayFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a GOAWAY frame: %v", err) + } + gf, ok := f.(*GoAwayFrame) + if !ok { + st.t.Fatalf("got a %T; want *GoAwayFrame", f) + } + return gf +} + +func (st *serverTester) wantRSTStream(streamID uint32, errCode ErrCode) { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting an RSTStream frame: %v", err) + } + rs, ok := f.(*RSTStreamFrame) + if !ok { + st.t.Fatalf("got a %T; want *RSTStreamFrame", f) + } + if rs.FrameHeader.StreamID != streamID { + st.t.Fatalf("RSTStream StreamID = %d; want %d", rs.FrameHeader.StreamID, streamID) + } + if rs.ErrCode != errCode { + st.t.Fatalf("RSTStream ErrCode = %d (%s); want %d (%s)", rs.ErrCode, rs.ErrCode, errCode, errCode) + } +} + +func (st *serverTester) wantWindowUpdate(streamID, incr uint32) { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a WINDOW_UPDATE frame: %v", err) + } + wu, ok := f.(*WindowUpdateFrame) + if !ok { + st.t.Fatalf("got a %T; want *WindowUpdateFrame", f) + } + if wu.FrameHeader.StreamID != streamID { + st.t.Fatalf("WindowUpdate StreamID = %d; want %d", wu.FrameHeader.StreamID, streamID) + } + if wu.Increment != incr { + st.t.Fatalf("WindowUpdate increment = %d; want %d", wu.Increment, incr) + } +} + +func (st *serverTester) wantSettingsAck() { + f, err := st.readFrame() + if err != nil { + st.t.Fatal(err) + } + sf, ok := f.(*SettingsFrame) + if !ok { + st.t.Fatalf("Wanting a settings ACK, received a %T", f) + } + if !sf.Header().Flags.Has(FlagSettingsAck) { + st.t.Fatal("Settings Frame didn't have ACK set") + } + +} + +func TestServer(t *testing.T) { + gotReq := make(chan bool, 1) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Foo", "Bar") + gotReq <- true + }) + defer st.Close() + + covers("3.5", ` + The server connection preface consists of a potentially empty + SETTINGS frame ([SETTINGS]) that MUST be the first frame the + server sends in the HTTP/2 connection. + `) + + st.writePreface() + st.writeInitialSettings() + st.wantSettings() + st.writeSettingsAck() + st.wantSettingsAck() + + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(), + EndStream: true, // no DATA frames + EndHeaders: true, + }) + + select { + case <-gotReq: + case <-time.After(2 * time.Second): + t.Error("timeout waiting for request") + } +} + +func TestServer_Request_Get(t *testing.T) { + testServerRequest(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader("foo-bar", "some-value"), + EndStream: true, // no DATA frames + EndHeaders: true, + }) + }, func(r *http.Request) { + if r.Method != "GET" { + t.Errorf("Method = %q; want GET", r.Method) + } + if r.URL.Path != "/" { + t.Errorf("URL.Path = %q; want /", r.URL.Path) + } + if r.ContentLength != 0 { + t.Errorf("ContentLength = %v; want 0", r.ContentLength) + } + if r.Close { + t.Error("Close = true; want false") + } + if !strings.Contains(r.RemoteAddr, ":") { + t.Errorf("RemoteAddr = %q; want something with a colon", r.RemoteAddr) + } + if r.Proto != "HTTP/2.0" || r.ProtoMajor != 2 || r.ProtoMinor != 0 { + t.Errorf("Proto = %q Major=%v,Minor=%v; want HTTP/2.0", r.Proto, r.ProtoMajor, r.ProtoMinor) + } + wantHeader := http.Header{ + "Foo-Bar": []string{"some-value"}, + } + if !reflect.DeepEqual(r.Header, wantHeader) { + t.Errorf("Header = %#v; want %#v", r.Header, wantHeader) + } + if n, err := r.Body.Read([]byte(" ")); err != io.EOF || n != 0 { + t.Errorf("Read = %d, %v; want 0, EOF", n, err) + } + }) +} + +func TestServer_Request_Get_PathSlashes(t *testing.T) { + testServerRequest(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":path", "/%2f/"), + EndStream: true, // no DATA frames + EndHeaders: true, + }) + }, func(r *http.Request) { + if r.RequestURI != "/%2f/" { + t.Errorf("RequestURI = %q; want /%%2f/", r.RequestURI) + } + if r.URL.Path != "///" { + t.Errorf("URL.Path = %q; want ///", r.URL.Path) + } + }) +} + +// TODO: add a test with EndStream=true on the HEADERS but setting a +// Content-Length anyway. Should we just omit it and force it to +// zero? + +func TestServer_Request_Post_NoContentLength_EndStream(t *testing.T) { + testServerRequest(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: true, + EndHeaders: true, + }) + }, func(r *http.Request) { + if r.Method != "POST" { + t.Errorf("Method = %q; want POST", r.Method) + } + if r.ContentLength != 0 { + t.Errorf("ContentLength = %v; want 0", r.ContentLength) + } + if n, err := r.Body.Read([]byte(" ")); err != io.EOF || n != 0 { + t.Errorf("Read = %d, %v; want 0, EOF", n, err) + } + }) +} + +func TestServer_Request_Post_Body_ImmediateEOF(t *testing.T) { + testBodyContents(t, -1, "", func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // to say DATA frames are coming + EndHeaders: true, + }) + st.writeData(1, true, nil) // just kidding. empty body. + }) +} + +func TestServer_Request_Post_Body_OneData(t *testing.T) { + const content = "Some content" + testBodyContents(t, -1, content, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // to say DATA frames are coming + EndHeaders: true, + }) + st.writeData(1, true, []byte(content)) + }) +} + +func TestServer_Request_Post_Body_TwoData(t *testing.T) { + const content = "Some content" + testBodyContents(t, -1, content, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // to say DATA frames are coming + EndHeaders: true, + }) + st.writeData(1, false, []byte(content[:5])) + st.writeData(1, true, []byte(content[5:])) + }) +} + +func TestServer_Request_Post_Body_ContentLength_Correct(t *testing.T) { + const content = "Some content" + testBodyContents(t, int64(len(content)), content, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader( + ":method", "POST", + "content-length", strconv.Itoa(len(content)), + ), + EndStream: false, // to say DATA frames are coming + EndHeaders: true, + }) + st.writeData(1, true, []byte(content)) + }) +} + +func TestServer_Request_Post_Body_ContentLength_TooLarge(t *testing.T) { + testBodyContentsFail(t, 3, "request declared a Content-Length of 3 but only wrote 2 bytes", + func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader( + ":method", "POST", + "content-length", "3", + ), + EndStream: false, // to say DATA frames are coming + EndHeaders: true, + }) + st.writeData(1, true, []byte("12")) + }) +} + +func TestServer_Request_Post_Body_ContentLength_TooSmall(t *testing.T) { + testBodyContentsFail(t, 4, "sender tried to send more than declared Content-Length of 4 bytes", + func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader( + ":method", "POST", + "content-length", "4", + ), + EndStream: false, // to say DATA frames are coming + EndHeaders: true, + }) + st.writeData(1, true, []byte("12345")) + }) +} + +func testBodyContents(t *testing.T, wantContentLength int64, wantBody string, write func(st *serverTester)) { + testServerRequest(t, write, func(r *http.Request) { + if r.Method != "POST" { + t.Errorf("Method = %q; want POST", r.Method) + } + if r.ContentLength != wantContentLength { + t.Errorf("ContentLength = %v; want %d", r.ContentLength, wantContentLength) + } + all, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + if string(all) != wantBody { + t.Errorf("Read = %q; want %q", all, wantBody) + } + if err := r.Body.Close(); err != nil { + t.Fatalf("Close: %v", err) + } + }) +} + +func testBodyContentsFail(t *testing.T, wantContentLength int64, wantReadError string, write func(st *serverTester)) { + testServerRequest(t, write, func(r *http.Request) { + if r.Method != "POST" { + t.Errorf("Method = %q; want POST", r.Method) + } + if r.ContentLength != wantContentLength { + t.Errorf("ContentLength = %v; want %d", r.ContentLength, wantContentLength) + } + all, err := ioutil.ReadAll(r.Body) + if err == nil { + t.Fatalf("expected an error (%q) reading from the body. Successfully read %q instead.", + wantReadError, all) + } + if !strings.Contains(err.Error(), wantReadError) { + t.Fatalf("Body.Read = %v; want substring %q", err, wantReadError) + } + if err := r.Body.Close(); err != nil { + t.Fatalf("Close: %v", err) + } + }) +} + +// Using a Host header, instead of :authority +func TestServer_Request_Get_Host(t *testing.T) { + const host = "example.com" + testServerRequest(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader("host", host), + EndStream: true, + EndHeaders: true, + }) + }, func(r *http.Request) { + if r.Host != host { + t.Errorf("Host = %q; want %q", r.Host, host) + } + }) +} + +// Using an :authority pseudo-header, instead of Host +func TestServer_Request_Get_Authority(t *testing.T) { + const host = "example.com" + testServerRequest(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":authority", host), + EndStream: true, + EndHeaders: true, + }) + }, func(r *http.Request) { + if r.Host != host { + t.Errorf("Host = %q; want %q", r.Host, host) + } + }) +} + +func TestServer_Request_WithContinuation(t *testing.T) { + wantHeader := http.Header{ + "Foo-One": []string{"value-one"}, + "Foo-Two": []string{"value-two"}, + "Foo-Three": []string{"value-three"}, + } + testServerRequest(t, func(st *serverTester) { + fullHeaders := st.encodeHeader( + "foo-one", "value-one", + "foo-two", "value-two", + "foo-three", "value-three", + ) + remain := fullHeaders + chunks := 0 + for len(remain) > 0 { + const maxChunkSize = 5 + chunk := remain + if len(chunk) > maxChunkSize { + chunk = chunk[:maxChunkSize] + } + remain = remain[len(chunk):] + + if chunks == 0 { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: chunk, + EndStream: true, // no DATA frames + EndHeaders: false, // we'll have continuation frames + }) + } else { + err := st.fr.WriteContinuation(1, len(remain) == 0, chunk) + if err != nil { + t.Fatal(err) + } + } + chunks++ + } + if chunks < 2 { + t.Fatal("too few chunks") + } + }, func(r *http.Request) { + if !reflect.DeepEqual(r.Header, wantHeader) { + t.Errorf("Header = %#v; want %#v", r.Header, wantHeader) + } + }) +} + +// Concatenated cookie headers. ("8.1.2.5 Compressing the Cookie Header Field") +func TestServer_Request_CookieConcat(t *testing.T) { + const host = "example.com" + testServerRequest(t, func(st *serverTester) { + st.bodylessReq1( + ":authority", host, + "cookie", "a=b", + "cookie", "c=d", + "cookie", "e=f", + ) + }, func(r *http.Request) { + const want = "a=b; c=d; e=f" + if got := r.Header.Get("Cookie"); got != want { + t.Errorf("Cookie = %q; want %q", got, want) + } + }) +} + +func TestServer_Request_Reject_CapitalHeader(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("UPPER", "v") }) +} + +func TestServer_Request_Reject_Pseudo_Missing_method(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":method", "") }) +} + +func TestServer_Request_Reject_Pseudo_ExactlyOne(t *testing.T) { + // 8.1.2.3 Request Pseudo-Header Fields + // "All HTTP/2 requests MUST include exactly one valid value" ... + testRejectRequest(t, func(st *serverTester) { + st.addLogFilter("duplicate pseudo-header") + st.bodylessReq1(":method", "GET", ":method", "POST") + }) +} + +func TestServer_Request_Reject_Pseudo_AfterRegular(t *testing.T) { + // 8.1.2.3 Request Pseudo-Header Fields + // "All pseudo-header fields MUST appear in the header block + // before regular header fields. Any request or response that + // contains a pseudo-header field that appears in a header + // block after a regular header field MUST be treated as + // malformed (Section 8.1.2.6)." + testRejectRequest(t, func(st *serverTester) { + st.addLogFilter("pseudo-header after regular header") + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":method", Value: "GET"}) + enc.WriteField(hpack.HeaderField{Name: "regular", Value: "foobar"}) + enc.WriteField(hpack.HeaderField{Name: ":path", Value: "/"}) + enc.WriteField(hpack.HeaderField{Name: ":scheme", Value: "https"}) + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: buf.Bytes(), + EndStream: true, + EndHeaders: true, + }) + }) +} + +func TestServer_Request_Reject_Pseudo_Missing_path(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":path", "") }) +} + +func TestServer_Request_Reject_Pseudo_Missing_scheme(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":scheme", "") }) +} + +func TestServer_Request_Reject_Pseudo_scheme_invalid(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":scheme", "bogus") }) +} + +func TestServer_Request_Reject_Pseudo_Unknown(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { + st.addLogFilter(`invalid pseudo-header ":unknown_thing"`) + st.bodylessReq1(":unknown_thing", "") + }) +} + +func testRejectRequest(t *testing.T, send func(*serverTester)) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + t.Fatal("server request made it to handler; should've been rejected") + }) + defer st.Close() + + st.greet() + send(st) + st.wantRSTStream(1, ErrCodeProtocol) +} + +func TestServer_Ping(t *testing.T) { + st := newServerTester(t, nil) + defer st.Close() + st.greet() + + // Server should ignore this one, since it has ACK set. + ackPingData := [8]byte{1, 2, 4, 8, 16, 32, 64, 128} + if err := st.fr.WritePing(true, ackPingData); err != nil { + t.Fatal(err) + } + + // But the server should reply to this one, since ACK is false. + pingData := [8]byte{1, 2, 3, 4, 5, 6, 7, 8} + if err := st.fr.WritePing(false, pingData); err != nil { + t.Fatal(err) + } + + pf := st.wantPing() + if !pf.Flags.Has(FlagPingAck) { + t.Error("response ping doesn't have ACK set") + } + if pf.Data != pingData { + t.Errorf("response ping has data %q; want %q", pf.Data, pingData) + } +} + +func TestServer_RejectsLargeFrames(t *testing.T) { + st := newServerTester(t, nil) + defer st.Close() + st.greet() + + // Write too large of a frame (too large by one byte) + // We ignore the return value because it's expected that the server + // will only read the first 9 bytes (the headre) and then disconnect. + st.fr.WriteRawFrame(0xff, 0, 0, make([]byte, defaultMaxReadFrameSize+1)) + + gf := st.wantGoAway() + if gf.ErrCode != ErrCodeFrameSize { + t.Errorf("GOAWAY err = %v; want %v", gf.ErrCode, ErrCodeFrameSize) + } +} + +func TestServer_Handler_Sends_WindowUpdate(t *testing.T) { + puppet := newHandlerPuppet() + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + puppet.act(w, r) + }) + defer st.Close() + defer puppet.done() + + st.greet() + + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // data coming + EndHeaders: true, + }) + st.writeData(1, false, []byte("abcdef")) + puppet.do(readBodyHandler(t, "abc")) + st.wantWindowUpdate(0, 3) + st.wantWindowUpdate(1, 3) + + puppet.do(readBodyHandler(t, "def")) + st.wantWindowUpdate(0, 3) + st.wantWindowUpdate(1, 3) + + st.writeData(1, true, []byte("ghijkl")) // END_STREAM here + puppet.do(readBodyHandler(t, "ghi")) + puppet.do(readBodyHandler(t, "jkl")) + st.wantWindowUpdate(0, 3) + st.wantWindowUpdate(0, 3) // no more stream-level, since END_STREAM +} + +func TestServer_Send_GoAway_After_Bogus_WindowUpdate(t *testing.T) { + st := newServerTester(t, nil) + defer st.Close() + st.greet() + if err := st.fr.WriteWindowUpdate(0, 1<<31-1); err != nil { + t.Fatal(err) + } + gf := st.wantGoAway() + if gf.ErrCode != ErrCodeFlowControl { + t.Errorf("GOAWAY err = %v; want %v", gf.ErrCode, ErrCodeFlowControl) + } + if gf.LastStreamID != 0 { + t.Errorf("GOAWAY last stream ID = %v; want %v", gf.LastStreamID, 0) + } +} + +func TestServer_Send_RstStream_After_Bogus_WindowUpdate(t *testing.T) { + inHandler := make(chan bool) + blockHandler := make(chan bool) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + inHandler <- true + <-blockHandler + }) + defer st.Close() + defer close(blockHandler) + st.greet() + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // keep it open + EndHeaders: true, + }) + <-inHandler + // Send a bogus window update: + if err := st.fr.WriteWindowUpdate(1, 1<<31-1); err != nil { + t.Fatal(err) + } + st.wantRSTStream(1, ErrCodeFlowControl) +} + +// testServerPostUnblock sends a hanging POST with unsent data to handler, +// then runs fn once in the handler, and verifies that the error returned from +// handler is acceptable. It fails if takes over 5 seconds for handler to exit. +func testServerPostUnblock(t *testing.T, + handler func(http.ResponseWriter, *http.Request) error, + fn func(*serverTester), + checkErr func(error), + otherHeaders ...string) { + inHandler := make(chan bool) + errc := make(chan error, 1) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + inHandler <- true + errc <- handler(w, r) + }) + st.greet() + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(append([]string{":method", "POST"}, otherHeaders...)...), + EndStream: false, // keep it open + EndHeaders: true, + }) + <-inHandler + fn(st) + select { + case err := <-errc: + if checkErr != nil { + checkErr(err) + } + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for Handler to return") + } + st.Close() +} + +func TestServer_RSTStream_Unblocks_Read(t *testing.T) { + testServerPostUnblock(t, + func(w http.ResponseWriter, r *http.Request) (err error) { + _, err = r.Body.Read(make([]byte, 1)) + return + }, + func(st *serverTester) { + if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil { + t.Fatal(err) + } + }, + func(err error) { + if err == nil { + t.Error("unexpected nil error from Request.Body.Read") + } + }, + ) +} + +func TestServer_DeadConn_Unblocks_Read(t *testing.T) { + testServerPostUnblock(t, + func(w http.ResponseWriter, r *http.Request) (err error) { + _, err = r.Body.Read(make([]byte, 1)) + return + }, + func(st *serverTester) { st.cc.Close() }, + func(err error) { + if err == nil { + t.Error("unexpected nil error from Request.Body.Read") + } + }, + ) +} + +var blockUntilClosed = func(w http.ResponseWriter, r *http.Request) error { + <-w.(http.CloseNotifier).CloseNotify() + return nil +} + +func TestServer_CloseNotify_After_RSTStream(t *testing.T) { + testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) { + if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil { + t.Fatal(err) + } + }, nil) +} + +func TestServer_CloseNotify_After_ConnClose(t *testing.T) { + testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) { st.cc.Close() }, nil) +} + +// that CloseNotify unblocks after a stream error due to the client's +// problem that's unrelated to them explicitly canceling it (which is +// TestServer_CloseNotify_After_RSTStream above) +func TestServer_CloseNotify_After_StreamError(t *testing.T) { + testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) { + // data longer than declared Content-Length => stream error + st.writeData(1, true, []byte("1234")) + }, nil, "content-length", "3") +} + +func TestServer_StateTransitions(t *testing.T) { + var st *serverTester + inHandler := make(chan bool) + writeData := make(chan bool) + leaveHandler := make(chan bool) + st = newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + inHandler <- true + if st.stream(1) == nil { + t.Errorf("nil stream 1 in handler") + } + if got, want := st.streamState(1), stateOpen; got != want { + t.Errorf("in handler, state is %v; want %v", got, want) + } + writeData <- true + if n, err := r.Body.Read(make([]byte, 1)); n != 0 || err != io.EOF { + t.Errorf("body read = %d, %v; want 0, EOF", n, err) + } + if got, want := st.streamState(1), stateHalfClosedRemote; got != want { + t.Errorf("in handler, state is %v; want %v", got, want) + } + + <-leaveHandler + }) + st.greet() + if st.stream(1) != nil { + t.Fatal("stream 1 should be empty") + } + if got := st.streamState(1); got != stateIdle { + t.Fatalf("stream 1 should be idle; got %v", got) + } + + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // keep it open + EndHeaders: true, + }) + <-inHandler + <-writeData + st.writeData(1, true, nil) + + leaveHandler <- true + hf := st.wantHeaders() + if !hf.StreamEnded() { + t.Fatal("expected END_STREAM flag") + } + + if got, want := st.streamState(1), stateClosed; got != want { + t.Errorf("at end, state is %v; want %v", got, want) + } + if st.stream(1) != nil { + t.Fatal("at end, stream 1 should be gone") + } +} + +// test HEADERS w/o EndHeaders + another HEADERS (should get rejected) +func TestServer_Rejects_HeadersNoEnd_Then_Headers(t *testing.T) { + testServerRejects(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: false, + }) + st.writeHeaders(HeadersFrameParam{ // Not a continuation. + StreamID: 3, // different stream. + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + }) + }) +} + +// test HEADERS w/o EndHeaders + PING (should get rejected) +func TestServer_Rejects_HeadersNoEnd_Then_Ping(t *testing.T) { + testServerRejects(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: false, + }) + if err := st.fr.WritePing(false, [8]byte{}); err != nil { + t.Fatal(err) + } + }) +} + +// test HEADERS w/ EndHeaders + a continuation HEADERS (should get rejected) +func TestServer_Rejects_HeadersEnd_Then_Continuation(t *testing.T) { + testServerRejects(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + }) + st.wantHeaders() + if err := st.fr.WriteContinuation(1, true, encodeHeaderNoImplicit(t, "foo", "bar")); err != nil { + t.Fatal(err) + } + }) +} + +// test HEADERS w/o EndHeaders + a continuation HEADERS on wrong stream ID +func TestServer_Rejects_HeadersNoEnd_Then_ContinuationWrongStream(t *testing.T) { + testServerRejects(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: false, + }) + if err := st.fr.WriteContinuation(3, true, encodeHeaderNoImplicit(t, "foo", "bar")); err != nil { + t.Fatal(err) + } + }) +} + +// No HEADERS on stream 0. +func TestServer_Rejects_Headers0(t *testing.T) { + testServerRejects(t, func(st *serverTester) { + st.fr.AllowIllegalWrites = true + st.writeHeaders(HeadersFrameParam{ + StreamID: 0, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + }) + }) +} + +// No CONTINUATION on stream 0. +func TestServer_Rejects_Continuation0(t *testing.T) { + testServerRejects(t, func(st *serverTester) { + st.fr.AllowIllegalWrites = true + if err := st.fr.WriteContinuation(0, true, st.encodeHeader()); err != nil { + t.Fatal(err) + } + }) +} + +func TestServer_Rejects_PushPromise(t *testing.T) { + testServerRejects(t, func(st *serverTester) { + pp := PushPromiseParam{ + StreamID: 1, + PromiseID: 3, + } + if err := st.fr.WritePushPromise(pp); err != nil { + t.Fatal(err) + } + }) +} + +// testServerRejects tests that the server hangs up with a GOAWAY +// frame and a server close after the client does something +// deserving a CONNECTION_ERROR. +func testServerRejects(t *testing.T, writeReq func(*serverTester)) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {}) + st.addLogFilter("connection error: PROTOCOL_ERROR") + defer st.Close() + st.greet() + writeReq(st) + + st.wantGoAway() + errc := make(chan error, 1) + go func() { + fr, err := st.fr.ReadFrame() + if err == nil { + err = fmt.Errorf("got frame of type %T", fr) + } + errc <- err + }() + select { + case err := <-errc: + if err != io.EOF { + t.Errorf("ReadFrame = %v; want io.EOF", err) + } + case <-time.After(2 * time.Second): + t.Error("timeout waiting for disconnect") + } +} + +// testServerRequest sets up an idle HTTP/2 connection and lets you +// write a single request with writeReq, and then verify that the +// *http.Request is built correctly in checkReq. +func testServerRequest(t *testing.T, writeReq func(*serverTester), checkReq func(*http.Request)) { + gotReq := make(chan bool, 1) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + if r.Body == nil { + t.Fatal("nil Body") + } + checkReq(r) + gotReq <- true + }) + defer st.Close() + + st.greet() + writeReq(st) + + select { + case <-gotReq: + case <-time.After(2 * time.Second): + t.Error("timeout waiting for request") + } +} + +func getSlash(st *serverTester) { st.bodylessReq1() } + +func TestServer_Response_NoData(t *testing.T) { + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + // Nothing. + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if !hf.StreamEnded() { + t.Fatal("want END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + }) +} + +func TestServer_Response_NoData_Header_FooBar(t *testing.T) { + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.Header().Set("Foo-Bar", "some-value") + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if !hf.StreamEnded() { + t.Fatal("want END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := decodeHeader(t, hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"foo-bar", "some-value"}, + {"content-type", "text/plain; charset=utf-8"}, + {"content-length", "0"}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + }) +} + +func TestServer_Response_Data_Sniff_DoesntOverride(t *testing.T) { + const msg = "this is HTML." + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.Header().Set("Content-Type", "foo/bar") + io.WriteString(w, msg) + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("don't want END_STREAM, expecting data") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := decodeHeader(t, hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"content-type", "foo/bar"}, + {"content-length", strconv.Itoa(len(msg))}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + df := st.wantData() + if !df.StreamEnded() { + t.Error("expected DATA to have END_STREAM flag") + } + if got := string(df.Data()); got != msg { + t.Errorf("got DATA %q; want %q", got, msg) + } + }) +} + +func TestServer_Response_TransferEncoding_chunked(t *testing.T) { + const msg = "hi" + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.Header().Set("Transfer-Encoding", "chunked") // should be stripped + io.WriteString(w, msg) + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + goth := decodeHeader(t, hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"content-type", "text/plain; charset=utf-8"}, + {"content-length", strconv.Itoa(len(msg))}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + }) +} + +// Header accessed only after the initial write. +func TestServer_Response_Data_IgnoreHeaderAfterWrite_After(t *testing.T) { + const msg = "this is HTML." + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + io.WriteString(w, msg) + w.Header().Set("foo", "should be ignored") + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := decodeHeader(t, hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"content-type", "text/html; charset=utf-8"}, + {"content-length", strconv.Itoa(len(msg))}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + }) +} + +// Header accessed before the initial write and later mutated. +func TestServer_Response_Data_IgnoreHeaderAfterWrite_Overwrite(t *testing.T) { + const msg = "this is HTML." + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.Header().Set("foo", "proper value") + io.WriteString(w, msg) + w.Header().Set("foo", "should be ignored") + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := decodeHeader(t, hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"foo", "proper value"}, + {"content-type", "text/html; charset=utf-8"}, + {"content-length", strconv.Itoa(len(msg))}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + }) +} + +func TestServer_Response_Data_SniffLenType(t *testing.T) { + const msg = "this is HTML." + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + io.WriteString(w, msg) + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("don't want END_STREAM, expecting data") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := decodeHeader(t, hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"content-type", "text/html; charset=utf-8"}, + {"content-length", strconv.Itoa(len(msg))}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + df := st.wantData() + if !df.StreamEnded() { + t.Error("expected DATA to have END_STREAM flag") + } + if got := string(df.Data()); got != msg { + t.Errorf("got DATA %q; want %q", got, msg) + } + }) +} + +func TestServer_Response_Header_Flush_MidWrite(t *testing.T) { + const msg = "this is HTML" + const msg2 = ", and this is the next chunk" + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + io.WriteString(w, msg) + w.(http.Flusher).Flush() + io.WriteString(w, msg2) + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := decodeHeader(t, hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"content-type", "text/html; charset=utf-8"}, // sniffed + // and no content-length + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + { + df := st.wantData() + if df.StreamEnded() { + t.Error("unexpected END_STREAM flag") + } + if got := string(df.Data()); got != msg { + t.Errorf("got DATA %q; want %q", got, msg) + } + } + { + df := st.wantData() + if !df.StreamEnded() { + t.Error("wanted END_STREAM flag on last data chunk") + } + if got := string(df.Data()); got != msg2 { + t.Errorf("got DATA %q; want %q", got, msg2) + } + } + }) +} + +func TestServer_Response_LargeWrite(t *testing.T) { + const size = 1 << 20 + const maxFrameSize = 16 << 10 + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + n, err := w.Write(bytes.Repeat([]byte("a"), size)) + if err != nil { + return fmt.Errorf("Write error: %v", err) + } + if n != size { + return fmt.Errorf("wrong size %d from Write", n) + } + return nil + }, func(st *serverTester) { + if err := st.fr.WriteSettings( + Setting{SettingInitialWindowSize, 0}, + Setting{SettingMaxFrameSize, maxFrameSize}, + ); err != nil { + t.Fatal(err) + } + st.wantSettingsAck() + + getSlash(st) // make the single request + + // Give the handler quota to write: + if err := st.fr.WriteWindowUpdate(1, size); err != nil { + t.Fatal(err) + } + // Give the handler quota to write to connection-level + // window as well + if err := st.fr.WriteWindowUpdate(0, size); err != nil { + t.Fatal(err) + } + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := decodeHeader(t, hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"content-type", "text/plain; charset=utf-8"}, // sniffed + // and no content-length + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + var bytes, frames int + for { + df := st.wantData() + bytes += len(df.Data()) + frames++ + for _, b := range df.Data() { + if b != 'a' { + t.Fatal("non-'a' byte seen in DATA") + } + } + if df.StreamEnded() { + break + } + } + if bytes != size { + t.Errorf("Got %d bytes; want %d", bytes, size) + } + if want := int(size / maxFrameSize); frames < want || frames > want*2 { + t.Errorf("Got %d frames; want %d", frames, size) + } + }) +} + +// Test that the handler can't write more than the client allows +func TestServer_Response_LargeWrite_FlowControlled(t *testing.T) { + const size = 1 << 20 + const maxFrameSize = 16 << 10 + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.(http.Flusher).Flush() + n, err := w.Write(bytes.Repeat([]byte("a"), size)) + if err != nil { + return fmt.Errorf("Write error: %v", err) + } + if n != size { + return fmt.Errorf("wrong size %d from Write", n) + } + return nil + }, func(st *serverTester) { + // Set the window size to something explicit for this test. + // It's also how much initial data we expect. + const initWindowSize = 123 + if err := st.fr.WriteSettings( + Setting{SettingInitialWindowSize, initWindowSize}, + Setting{SettingMaxFrameSize, maxFrameSize}, + ); err != nil { + t.Fatal(err) + } + st.wantSettingsAck() + + getSlash(st) // make the single request + defer func() { st.fr.WriteRSTStream(1, ErrCodeCancel) }() + + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + + df := st.wantData() + if got := len(df.Data()); got != initWindowSize { + t.Fatalf("Initial window size = %d but got DATA with %d bytes", initWindowSize, got) + } + + for _, quota := range []int{1, 13, 127} { + if err := st.fr.WriteWindowUpdate(1, uint32(quota)); err != nil { + t.Fatal(err) + } + df := st.wantData() + if int(quota) != len(df.Data()) { + t.Fatalf("read %d bytes after giving %d quota", len(df.Data()), quota) + } + } + + if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil { + t.Fatal(err) + } + }) +} + +// Test that the handler blocked in a Write is unblocked if the server sends a RST_STREAM. +func TestServer_Response_RST_Unblocks_LargeWrite(t *testing.T) { + const size = 1 << 20 + const maxFrameSize = 16 << 10 + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.(http.Flusher).Flush() + errc := make(chan error, 1) + go func() { + _, err := w.Write(bytes.Repeat([]byte("a"), size)) + errc <- err + }() + select { + case err := <-errc: + if err == nil { + return errors.New("unexpected nil error from Write in handler") + } + return nil + case <-time.After(2 * time.Second): + return errors.New("timeout waiting for Write in handler") + } + }, func(st *serverTester) { + if err := st.fr.WriteSettings( + Setting{SettingInitialWindowSize, 0}, + Setting{SettingMaxFrameSize, maxFrameSize}, + ); err != nil { + t.Fatal(err) + } + st.wantSettingsAck() + + getSlash(st) // make the single request + + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + + if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil { + t.Fatal(err) + } + }) +} + +func TestServer_Response_Empty_Data_Not_FlowControlled(t *testing.T) { + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.(http.Flusher).Flush() + // Nothing; send empty DATA + return nil + }, func(st *serverTester) { + // Handler gets no data quota: + if err := st.fr.WriteSettings(Setting{SettingInitialWindowSize, 0}); err != nil { + t.Fatal(err) + } + st.wantSettingsAck() + + getSlash(st) // make the single request + + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + + df := st.wantData() + if got := len(df.Data()); got != 0 { + t.Fatalf("unexpected %d DATA bytes; want 0", got) + } + if !df.StreamEnded() { + t.Fatal("DATA didn't have END_STREAM") + } + }) +} + +func TestServer_Response_Automatic100Continue(t *testing.T) { + const msg = "foo" + const reply = "bar" + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + if v := r.Header.Get("Expect"); v != "" { + t.Errorf("Expect header = %q; want empty", v) + } + buf := make([]byte, len(msg)) + // This read should trigger the 100-continue being sent. + if n, err := io.ReadFull(r.Body, buf); err != nil || n != len(msg) || string(buf) != msg { + return fmt.Errorf("ReadFull = %q, %v; want %q, nil", buf[:n], err, msg) + } + _, err := io.WriteString(w, reply) + return err + }, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "POST", "expect", "100-continue"), + EndStream: false, + EndHeaders: true, + }) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := decodeHeader(t, hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "100"}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Fatalf("Got headers %v; want %v", goth, wanth) + } + + // Okay, they sent status 100, so we can send our + // gigantic and/or sensitive "foo" payload now. + st.writeData(1, true, []byte(msg)) + + st.wantWindowUpdate(0, uint32(len(msg))) + + hf = st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("expected data to follow") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth = decodeHeader(t, hf.HeaderBlockFragment()) + wanth = [][2]string{ + {":status", "200"}, + {"content-type", "text/plain; charset=utf-8"}, + {"content-length", strconv.Itoa(len(reply))}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + + df := st.wantData() + if string(df.Data()) != reply { + t.Errorf("Client read %q; want %q", df.Data(), reply) + } + if !df.StreamEnded() { + t.Errorf("expect data stream end") + } + }) +} + +func TestServer_HandlerWriteErrorOnDisconnect(t *testing.T) { + errc := make(chan error, 1) + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + p := []byte("some data.\n") + for { + _, err := w.Write(p) + if err != nil { + errc <- err + return nil + } + } + }, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: false, + EndHeaders: true, + }) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + // Close the connection and wait for the handler to (hopefully) notice. + st.cc.Close() + select { + case <-errc: + case <-time.After(5 * time.Second): + t.Error("timeout") + } + }) +} + +func TestServer_Rejects_Too_Many_Streams(t *testing.T) { + const testPath = "/some/path" + + inHandler := make(chan uint32) + leaveHandler := make(chan bool) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + id := w.(*responseWriter).rws.stream.id + inHandler <- id + if id == 1+(defaultMaxStreams+1)*2 && r.URL.Path != testPath { + t.Errorf("decoded final path as %q; want %q", r.URL.Path, testPath) + } + <-leaveHandler + }) + defer st.Close() + st.greet() + nextStreamID := uint32(1) + streamID := func() uint32 { + defer func() { nextStreamID += 2 }() + return nextStreamID + } + sendReq := func(id uint32, headers ...string) { + st.writeHeaders(HeadersFrameParam{ + StreamID: id, + BlockFragment: st.encodeHeader(headers...), + EndStream: true, + EndHeaders: true, + }) + } + for i := 0; i < defaultMaxStreams; i++ { + sendReq(streamID()) + <-inHandler + } + defer func() { + for i := 0; i < defaultMaxStreams; i++ { + leaveHandler <- true + } + }() + + // And this one should cross the limit: + // (It's also sent as a CONTINUATION, to verify we still track the decoder context, + // even if we're rejecting it) + rejectID := streamID() + headerBlock := st.encodeHeader(":path", testPath) + frag1, frag2 := headerBlock[:3], headerBlock[3:] + st.writeHeaders(HeadersFrameParam{ + StreamID: rejectID, + BlockFragment: frag1, + EndStream: true, + EndHeaders: false, // CONTINUATION coming + }) + if err := st.fr.WriteContinuation(rejectID, true, frag2); err != nil { + t.Fatal(err) + } + st.wantRSTStream(rejectID, ErrCodeProtocol) + + // But let a handler finish: + leaveHandler <- true + st.wantHeaders() + + // And now another stream should be able to start: + goodID := streamID() + sendReq(goodID, ":path", testPath) + select { + case got := <-inHandler: + if got != goodID { + t.Errorf("Got stream %d; want %d", got, goodID) + } + case <-time.After(3 * time.Second): + t.Error("timeout waiting for handler") + } +} + +// So many response headers that the server needs to use CONTINUATION frames: +func TestServer_Response_ManyHeaders_With_Continuation(t *testing.T) { + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + h := w.Header() + for i := 0; i < 5000; i++ { + h.Set(fmt.Sprintf("x-header-%d", i), fmt.Sprintf("x-value-%d", i)) + } + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.HeadersEnded() { + t.Fatal("got unwanted END_HEADERS flag") + } + n := 0 + for { + n++ + cf := st.wantContinuation() + if cf.HeadersEnded() { + break + } + } + if n < 5 { + t.Errorf("Only got %d CONTINUATION frames; expected 5+ (currently 6)", n) + } + }) +} + +// This previously crashed (reported by Mathieu Lonjaret as observed +// while using Camlistore) because we got a DATA frame from the client +// after the handler exited and our logic at the time was wrong, +// keeping a stream in the map in stateClosed, which tickled an +// invariant check later when we tried to remove that stream (via +// defer sc.closeAllStreamsOnConnClose) when the serverConn serve loop +// ended. +func TestServer_NoCrash_HandlerClose_Then_ClientClose(t *testing.T) { + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + // nothing + return nil + }, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: false, // DATA is coming + EndHeaders: true, + }) + hf := st.wantHeaders() + if !hf.HeadersEnded() || !hf.StreamEnded() { + t.Fatalf("want END_HEADERS+END_STREAM, got %v", hf) + } + + // Sent when the a Handler closes while a client has + // indicated it's still sending DATA: + st.wantRSTStream(1, ErrCodeCancel) + + // Now the handler has ended, so it's ended its + // stream, but the client hasn't closed its side + // (stateClosedLocal). So send more data and verify + // it doesn't crash with an internal invariant panic, like + // it did before. + st.writeData(1, true, []byte("foo")) + + // Sent after a peer sends data anyway (admittedly the + // previous RST_STREAM might've still been in-flight), + // but they'll get the more friendly 'cancel' code + // first. + st.wantRSTStream(1, ErrCodeStreamClosed) + + // Set up a bunch of machinery to record the panic we saw + // previously. + var ( + panMu sync.Mutex + panicVal interface{} + ) + + testHookOnPanicMu.Lock() + testHookOnPanic = func(sc *serverConn, pv interface{}) bool { + panMu.Lock() + panicVal = pv + panMu.Unlock() + return true + } + testHookOnPanicMu.Unlock() + + // Now force the serve loop to end, via closing the connection. + st.cc.Close() + select { + case <-st.sc.doneServing: + // Loop has exited. + panMu.Lock() + got := panicVal + panMu.Unlock() + if got != nil { + t.Errorf("Got panic: %v", got) + } + case <-time.After(5 * time.Second): + t.Error("timeout") + } + }) +} + +func TestServer_Rejects_TLS10(t *testing.T) { testRejectTLS(t, tls.VersionTLS10) } +func TestServer_Rejects_TLS11(t *testing.T) { testRejectTLS(t, tls.VersionTLS11) } + +func testRejectTLS(t *testing.T, max uint16) { + st := newServerTester(t, nil, func(c *tls.Config) { + c.MaxVersion = max + }) + defer st.Close() + gf := st.wantGoAway() + if got, want := gf.ErrCode, ErrCodeInadequateSecurity; got != want { + t.Errorf("Got error code %v; want %v", got, want) + } +} + +func TestServer_Rejects_TLSBadCipher(t *testing.T) { + st := newServerTester(t, nil, func(c *tls.Config) { + // Only list bad ones: + c.CipherSuites = []uint16{ + tls.TLS_RSA_WITH_RC4_128_SHA, + tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, + tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + } + }) + defer st.Close() + gf := st.wantGoAway() + if got, want := gf.ErrCode, ErrCodeInadequateSecurity; got != want { + t.Errorf("Got error code %v; want %v", got, want) + } +} + +func TestServer_Advertises_Common_Cipher(t *testing.T) { + const requiredSuite = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + st := newServerTester(t, nil, func(c *tls.Config) { + // Have the client only support the one required by the spec. + c.CipherSuites = []uint16{requiredSuite} + }, func(ts *httptest.Server) { + var srv *http.Server = ts.Config + // Have the server configured with one specific cipher suite + // which is banned. This tests that ConfigureServer ends up + // adding the good one to this list. + srv.TLSConfig = &tls.Config{ + CipherSuites: []uint16{tls.TLS_RSA_WITH_AES_128_CBC_SHA}, // just a banned one + } + }) + defer st.Close() + st.greet() +} + +// TODO: move this onto *serverTester, and re-use the same hpack +// decoding context throughout. We're just getting lucky here with +// creating a new decoder each time. +func decodeHeader(t *testing.T, headerBlock []byte) (pairs [][2]string) { + d := hpack.NewDecoder(initialHeaderTableSize, func(f hpack.HeaderField) { + pairs = append(pairs, [2]string{f.Name, f.Value}) + }) + if _, err := d.Write(headerBlock); err != nil { + t.Fatalf("hpack decoding error: %v", err) + } + if err := d.Close(); err != nil { + t.Fatalf("hpack decoding error: %v", err) + } + return +} + +// testServerResponse sets up an idle HTTP/2 connection and lets you +// write a single request with writeReq, and then reply to it in some way with the provided handler, +// and then verify the output with the serverTester again (assuming the handler returns nil) +func testServerResponse(t testing.TB, + handler func(http.ResponseWriter, *http.Request) error, + client func(*serverTester), +) { + errc := make(chan error, 1) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + if r.Body == nil { + t.Fatal("nil Body") + } + errc <- handler(w, r) + }) + defer st.Close() + + donec := make(chan bool) + go func() { + defer close(donec) + st.greet() + client(st) + }() + + select { + case <-donec: + return + case <-time.After(5 * time.Second): + t.Fatal("timeout") + } + + select { + case err := <-errc: + if err != nil { + t.Fatalf("Error in handler: %v", err) + } + case <-time.After(2 * time.Second): + t.Error("timeout waiting for handler to finish") + } +} + +// readBodyHandler returns an http Handler func that reads len(want) +// bytes from r.Body and fails t if the contents read were not +// the value of want. +func readBodyHandler(t *testing.T, want string) func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + buf := make([]byte, len(want)) + _, err := io.ReadFull(r.Body, buf) + if err != nil { + t.Error(err) + return + } + if string(buf) != want { + t.Errorf("read %q; want %q", buf, want) + } + } +} + +// TestServerWithCurl currently fails, hence the LenientCipherSuites test. See: +// https://github.com/tatsuhiro-t/nghttp2/issues/140 & +// http://sourceforge.net/p/curl/bugs/1472/ +func TestServerWithCurl(t *testing.T) { testServerWithCurl(t, false) } +func TestServerWithCurl_LenientCipherSuites(t *testing.T) { testServerWithCurl(t, true) } + +func testServerWithCurl(t *testing.T, permitProhibitedCipherSuites bool) { + if runtime.GOOS != "linux" { + t.Skip("skipping Docker test when not on Linux; requires --net which won't work with boot2docker anyway") + } + requireCurl(t) + const msg = "Hello from curl!\n" + ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Foo", "Bar") + w.Header().Set("Client-Proto", r.Proto) + io.WriteString(w, msg) + })) + ConfigureServer(ts.Config, &Server{ + PermitProhibitedCipherSuites: permitProhibitedCipherSuites, + }) + ts.TLS = ts.Config.TLSConfig // the httptest.Server has its own copy of this TLS config + ts.StartTLS() + defer ts.Close() + + var gotConn int32 + testHookOnConn = func() { atomic.StoreInt32(&gotConn, 1) } + + t.Logf("Running test server for curl to hit at: %s", ts.URL) + container := curl(t, "--silent", "--http2", "--insecure", "-v", ts.URL) + defer kill(container) + resc := make(chan interface{}, 1) + go func() { + res, err := dockerLogs(container) + if err != nil { + resc <- err + } else { + resc <- res + } + }() + select { + case res := <-resc: + if err, ok := res.(error); ok { + t.Fatal(err) + } + if !strings.Contains(string(res.([]byte)), "foo: Bar") { + t.Errorf("didn't see foo: Bar header") + t.Logf("Got: %s", res) + } + if !strings.Contains(string(res.([]byte)), "client-proto: HTTP/2") { + t.Errorf("didn't see client-proto: HTTP/2 header") + t.Logf("Got: %s", res) + } + if !strings.Contains(string(res.([]byte)), msg) { + t.Errorf("didn't see %q content", msg) + t.Logf("Got: %s", res) + } + case <-time.After(3 * time.Second): + t.Errorf("timeout waiting for curl") + } + + if atomic.LoadInt32(&gotConn) == 0 { + t.Error("never saw an http2 connection") + } +} + +func BenchmarkServerGets(b *testing.B) { + b.ReportAllocs() + + const msg = "Hello, world" + st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, msg) + }) + defer st.Close() + st.greet() + + // Give the server quota to reply. (plus it has the the 64KB) + if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil { + b.Fatal(err) + } + + for i := 0; i < b.N; i++ { + id := 1 + uint32(i)*2 + st.writeHeaders(HeadersFrameParam{ + StreamID: id, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + }) + st.wantHeaders() + df := st.wantData() + if !df.StreamEnded() { + b.Fatalf("DATA didn't have END_STREAM; got %v", df) + } + } +} + +func BenchmarkServerPosts(b *testing.B) { + b.ReportAllocs() + + const msg = "Hello, world" + st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, msg) + }) + defer st.Close() + st.greet() + + // Give the server quota to reply. (plus it has the the 64KB) + if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil { + b.Fatal(err) + } + + for i := 0; i < b.N; i++ { + id := 1 + uint32(i)*2 + st.writeHeaders(HeadersFrameParam{ + StreamID: id, + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, + EndHeaders: true, + }) + st.writeData(id, true, nil) + st.wantHeaders() + df := st.wantData() + if !df.StreamEnded() { + b.Fatalf("DATA didn't have END_STREAM; got %v", df) + } + } +} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/testdata/draft-ietf-httpbis-http2.xml b/Godeps/_workspace/src/github.com/bradfitz/http2/testdata/draft-ietf-httpbis-http2.xml new file mode 100644 index 000000000..31a84bed4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/testdata/draft-ietf-httpbis-http2.xml @@ -0,0 +1,5021 @@ + + + + + + + + + + + + + + + + + + + Hypertext Transfer Protocol version 2 + + + Twist +
+ mbelshe@chromium.org +
+
+ + + Google, Inc +
+ fenix@google.com +
+
+ + + Mozilla +
+ + 331 E Evelyn Street + Mountain View + CA + 94041 + US + + martin.thomson@gmail.com +
+
+ + + Applications + HTTPbis + HTTP + SPDY + Web + + + + This specification describes an optimized expression of the semantics of the Hypertext + Transfer Protocol (HTTP). HTTP/2 enables a more efficient use of network resources and a + reduced perception of latency by introducing header field compression and allowing multiple + concurrent messages on the same connection. It also introduces unsolicited push of + representations from servers to clients. + + + This specification is an alternative to, but does not obsolete, the HTTP/1.1 message syntax. + HTTP's existing semantics remain unchanged. + + + + + + Discussion of this draft takes place on the HTTPBIS working group mailing list + (ietf-http-wg@w3.org), which is archived at . + + + Working Group information can be found at ; that specific to HTTP/2 are at . + + + The changes in this draft are summarized in . + + + +
+ + +
+ + + The Hypertext Transfer Protocol (HTTP) is a wildly successful protocol. However, the + HTTP/1.1 message format () has + several characteristics that have a negative overall effect on application performance + today. + + + In particular, HTTP/1.0 allowed only one request to be outstanding at a time on a given + TCP connection. HTTP/1.1 added request pipelining, but this only partially addressed + request concurrency and still suffers from head-of-line blocking. Therefore, HTTP/1.1 + clients that need to make many requests typically use multiple connections to a server in + order to achieve concurrency and thereby reduce latency. + + + Furthermore, HTTP header fields are often repetitive and verbose, causing unnecessary + network traffic, as well as causing the initial TCP congestion + window to quickly fill. This can result in excessive latency when multiple requests are + made on a new TCP connection. + + + HTTP/2 addresses these issues by defining an optimized mapping of HTTP's semantics to an + underlying connection. Specifically, it allows interleaving of request and response + messages on the same connection and uses an efficient coding for HTTP header fields. It + also allows prioritization of requests, letting more important requests complete more + quickly, further improving performance. + + + The resulting protocol is more friendly to the network, because fewer TCP connections can + be used in comparison to HTTP/1.x. This means less competition with other flows, and + longer-lived connections, which in turn leads to better utilization of available network + capacity. + + + Finally, HTTP/2 also enables more efficient processing of messages through use of binary + message framing. + +
+ +
+ + HTTP/2 provides an optimized transport for HTTP semantics. HTTP/2 supports all of the core + features of HTTP/1.1, but aims to be more efficient in several ways. + + + The basic protocol unit in HTTP/2 is a frame. Each frame + type serves a different purpose. For example, HEADERS and + DATA frames form the basis of HTTP requests and + responses; other frame types like SETTINGS, + WINDOW_UPDATE, and PUSH_PROMISE are used in support of other + HTTP/2 features. + + + Multiplexing of requests is achieved by having each HTTP request-response exchange + associated with its own stream. Streams are largely + independent of each other, so a blocked or stalled request or response does not prevent + progress on other streams. + + + Flow control and prioritization ensure that it is possible to efficiently use multiplexed + streams. Flow control helps to ensure that only data that + can be used by a receiver is transmitted. Prioritization ensures that limited resources can be directed + to the most important streams first. + + + HTTP/2 adds a new interaction mode, whereby a server can push + responses to a client. Server push allows a server to speculatively send a client + data that the server anticipates the client will need, trading off some network usage + against a potential latency gain. The server does this by synthesizing a request, which it + sends as a PUSH_PROMISE frame. The server is then able to send a response to + the synthetic request on a separate stream. + + + Frames that contain HTTP header fields are compressed. + HTTP requests can be highly redundant, so compression can reduce the size of requests and + responses significantly. + + +
+ + The HTTP/2 specification is split into four parts: + + + Starting HTTP/2 covers how an HTTP/2 connection is + initiated. + + + The framing and streams layers describe the way HTTP/2 frames are + structured and formed into multiplexed streams. + + + Frame and error + definitions include details of the frame and error types used in HTTP/2. + + + HTTP mappings and additional + requirements describe how HTTP semantics are expressed using frames and + streams. + + + + + While some of the frame and stream layer concepts are isolated from HTTP, this + specification does not define a completely generic framing layer. The framing and streams + layers are tailored to the needs of the HTTP protocol and server push. + +
+ +
+ + The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD + NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as + described in RFC 2119. + + + All numeric values are in network byte order. Values are unsigned unless otherwise + indicated. Literal values are provided in decimal or hexadecimal as appropriate. + Hexadecimal literals are prefixed with 0x to distinguish them + from decimal literals. + + + The following terms are used: + + + The endpoint initiating the HTTP/2 connection. + + + A transport-layer connection between two endpoints. + + + An error that affects the entire HTTP/2 connection. + + + Either the client or server of the connection. + + + The smallest unit of communication within an HTTP/2 connection, consisting of a header + and a variable-length sequence of octets structured according to the frame type. + + + An endpoint. When discussing a particular endpoint, "peer" refers to the endpoint + that is remote to the primary subject of discussion. + + + An endpoint that is receiving frames. + + + An endpoint that is transmitting frames. + + + The endpoint which did not initiate the HTTP/2 connection. + + + A bi-directional flow of frames across a virtual channel within the HTTP/2 connection. + + + An error on the individual HTTP/2 stream. + + + + + Finally, the terms "gateway", "intermediary", "proxy", and "tunnel" are defined + in . + +
+
+ +
+ + An HTTP/2 connection is an application layer protocol running on top of a TCP connection + (). The client is the TCP connection initiator. + + + HTTP/2 uses the same "http" and "https" URI schemes used by HTTP/1.1. HTTP/2 shares the same + default port numbers: 80 for "http" URIs and 443 for "https" URIs. As a result, + implementations processing requests for target resource URIs like http://example.org/foo or https://example.com/bar are required to first discover whether the + upstream server (the immediate peer to which the client wishes to establish a connection) + supports HTTP/2. + + + + The means by which support for HTTP/2 is determined is different for "http" and "https" + URIs. Discovery for "http" URIs is described in . Discovery + for "https" URIs is described in . + + +
+ + The protocol defined in this document has two identifiers. + + + + The string "h2" identifies the protocol where HTTP/2 uses TLS. This identifier is used in the TLS application layer protocol negotiation extension (ALPN) + field and any place that HTTP/2 over TLS is identified. + + + The "h2" string is serialized into an ALPN protocol identifier as the two octet + sequence: 0x68, 0x32. + + + + + The string "h2c" identifies the protocol where HTTP/2 is run over cleartext TCP. + This identifier is used in the HTTP/1.1 Upgrade header field and any place that + HTTP/2 over TCP is identified. + + + + + + Negotiating "h2" or "h2c" implies the use of the transport, security, framing and message + semantics described in this document. + + + RFC Editor's Note: please remove the remainder of this section prior to the + publication of a final version of this document. + + + Only implementations of the final, published RFC can identify themselves as "h2" or "h2c". + Until such an RFC exists, implementations MUST NOT identify themselves using these + strings. + + + Examples and text throughout the rest of this document use "h2" as a matter of + editorial convenience only. Implementations of draft versions MUST NOT identify using + this string. + + + Implementations of draft versions of the protocol MUST add the string "-" and the + corresponding draft number to the identifier. For example, draft-ietf-httpbis-http2-11 + over TLS is identified using the string "h2-11". + + + Non-compatible experiments that are based on these draft versions MUST append the string + "-" and an experiment name to the identifier. For example, an experimental implementation + of packet mood-based encoding based on draft-ietf-httpbis-http2-09 might identify itself + as "h2-09-emo". Note that any label MUST conform to the "token" syntax defined in + . Experimenters are + encouraged to coordinate their experiments on the ietf-http-wg@w3.org mailing list. + +
+ +
+ + A client that makes a request for an "http" URI without prior knowledge about support for + HTTP/2 uses the HTTP Upgrade mechanism (). The client makes an HTTP/1.1 request that includes an Upgrade + header field identifying HTTP/2 with the "h2c" token. The HTTP/1.1 request MUST include + exactly one HTTP2-Settings header field. + +
+ For example: + + +]]> +
+ + Requests that contain an entity body MUST be sent in their entirety before the client can + send HTTP/2 frames. This means that a large request entity can block the use of the + connection until it is completely sent. + + + If concurrency of an initial request with subsequent requests is important, an OPTIONS + request can be used to perform the upgrade to HTTP/2, at the cost of an additional + round-trip. + + + A server that does not support HTTP/2 can respond to the request as though the Upgrade + header field were absent: + +
+ +HTTP/1.1 200 OK +Content-Length: 243 +Content-Type: text/html + +... + +
+ + A server MUST ignore a "h2" token in an Upgrade header field. Presence of a token with + "h2" implies HTTP/2 over TLS, which is instead negotiated as described in . + + + A server that supports HTTP/2 can accept the upgrade with a 101 (Switching Protocols) + response. After the empty line that terminates the 101 response, the server can begin + sending HTTP/2 frames. These frames MUST include a response to the request that initiated + the Upgrade. + + +
+ + For example: + + +HTTP/1.1 101 Switching Protocols +Connection: Upgrade +Upgrade: h2c + +[ HTTP/2 connection ... + +
+ + The first HTTP/2 frame sent by the server is a SETTINGS frame () as the server connection preface (). Upon receiving the 101 response, the client sends a connection preface, which includes a + SETTINGS frame. + + + The HTTP/1.1 request that is sent prior to upgrade is assigned stream identifier 1 and is + assigned default priority values. Stream 1 is + implicitly half closed from the client toward the server, since the request is completed + as an HTTP/1.1 request. After commencing the HTTP/2 connection, stream 1 is used for the + response. + + +
+ + A request that upgrades from HTTP/1.1 to HTTP/2 MUST include exactly one HTTP2-Settings header field. The HTTP2-Settings header field is a connection-specific header field + that includes parameters that govern the HTTP/2 connection, provided in anticipation of + the server accepting the request to upgrade. + +
+ +
+ + A server MUST NOT upgrade the connection to HTTP/2 if this header field is not present, + or if more than one is present. A server MUST NOT send this header field. + + + + The content of the HTTP2-Settings header field is the + payload of a SETTINGS frame (), encoded as a + base64url string (that is, the URL- and filename-safe Base64 encoding described in , with any trailing '=' characters omitted). The + ABNF production for token68 is + defined in . + + + Since the upgrade is only intended to apply to the immediate connection, a client + sending HTTP2-Settings MUST also send HTTP2-Settings as a connection option in the Connection header field to prevent it from being forwarded + downstream. + + + A server decodes and interprets these values as it would any other + SETTINGS frame. Acknowledgement of the + SETTINGS parameters is not necessary, since a 101 response serves as implicit + acknowledgment. Providing these values in the Upgrade request gives a client an + opportunity to provide parameters prior to receiving any frames from the server. + +
+
+ +
+ + A client that makes a request to an "https" URI uses TLS + with the application layer protocol negotiation extension. + + + HTTP/2 over TLS uses the "h2" application token. The "h2c" token MUST NOT be sent by a + client or selected by a server. + + + Once TLS negotiation is complete, both the client and the server send a connection preface. + +
+ +
+ + A client can learn that a particular server supports HTTP/2 by other means. For example, + describes a mechanism for advertising this capability. + + + A client MAY immediately send HTTP/2 frames to a server that is known to support HTTP/2, + after the connection preface; a server can + identify such a connection by the presence of the connection preface. This only affects + the establishment of HTTP/2 connections over cleartext TCP; implementations that support + HTTP/2 over TLS MUST use protocol negotiation in TLS. + + + Without additional information, prior support for HTTP/2 is not a strong signal that a + given server will support HTTP/2 for future connections. For example, it is possible for + server configurations to change, for configurations to differ between instances in + clustered servers, or for network conditions to change. + +
+ +
+ + Upon establishment of a TCP connection and determination that HTTP/2 will be used by both + peers, each endpoint MUST send a connection preface as a final confirmation and to + establish the initial SETTINGS parameters for the HTTP/2 connection. The client and + server each send a different connection preface. + + + The client connection preface starts with a sequence of 24 octets, which in hex notation + are: + +
+ +
+ + (the string PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n). This sequence + is followed by a SETTINGS frame (). The + SETTINGS frame MAY be empty. The client sends the client connection + preface immediately upon receipt of a 101 Switching Protocols response (indicating a + successful upgrade), or as the first application data octets of a TLS connection. If + starting an HTTP/2 connection with prior knowledge of server support for the protocol, the + client connection preface is sent upon connection establishment. + + + + + The client connection preface is selected so that a large proportion of HTTP/1.1 or + HTTP/1.0 servers and intermediaries do not attempt to process further frames. Note + that this does not address the concerns raised in . + + + + + The server connection preface consists of a potentially empty SETTINGS + frame () that MUST be the first frame the server sends in the + HTTP/2 connection. + + + The SETTINGS frames received from a peer as part of the connection preface + MUST be acknowledged (see ) after sending the connection + preface. + + + To avoid unnecessary latency, clients are permitted to send additional frames to the + server immediately after sending the client connection preface, without waiting to receive + the server connection preface. It is important to note, however, that the server + connection preface SETTINGS frame might include parameters that necessarily + alter how a client is expected to communicate with the server. Upon receiving the + SETTINGS frame, the client is expected to honor any parameters established. + In some configurations, it is possible for the server to transmit SETTINGS + before the client sends additional frames, providing an opportunity to avoid this issue. + + + Clients and servers MUST treat an invalid connection preface as a connection error of type + PROTOCOL_ERROR. A GOAWAY frame () + MAY be omitted in this case, since an invalid preface indicates that the peer is not using + HTTP/2. + +
+
+ +
+ + Once the HTTP/2 connection is established, endpoints can begin exchanging frames. + + +
+ + All frames begin with a fixed 9-octet header followed by a variable-length payload. + +
+ +
+ + The fields of the frame header are defined as: + + + + The length of the frame payload expressed as an unsigned 24-bit integer. Values + greater than 214 (16,384) MUST NOT be sent unless the receiver has + set a larger value for SETTINGS_MAX_FRAME_SIZE. + + + The 9 octets of the frame header are not included in this value. + + + + + The 8-bit type of the frame. The frame type determines the format and semantics of + the frame. Implementations MUST ignore and discard any frame that has a type that + is unknown. + + + + + An 8-bit field reserved for frame-type specific boolean flags. + + + Flags are assigned semantics specific to the indicated frame type. Flags that have + no defined semantics for a particular frame type MUST be ignored, and MUST be left + unset (0) when sending. + + + + + A reserved 1-bit field. The semantics of this bit are undefined and the bit MUST + remain unset (0) when sending and MUST be ignored when receiving. + + + + + A 31-bit stream identifier (see ). The value 0 is + reserved for frames that are associated with the connection as a whole as opposed to + an individual stream. + + + + + + The structure and content of the frame payload is dependent entirely on the frame type. + +
+ +
+ + The size of a frame payload is limited by the maximum size that a receiver advertises in + the SETTINGS_MAX_FRAME_SIZE setting. This setting can have any value + between 214 (16,384) and 224-1 (16,777,215) octets, + inclusive. + + + All implementations MUST be capable of receiving and minimally processing frames up to + 214 octets in length, plus the 9 octet frame + header. The size of the frame header is not included when describing frame sizes. + + + Certain frame types, such as PING, impose additional limits + on the amount of payload data allowed. + + + + + If a frame size exceeds any defined limit, or is too small to contain mandatory frame + data, the endpoint MUST send a FRAME_SIZE_ERROR error. A frame size error + in a frame that could alter the state of the entire connection MUST be treated as a connection error; this includes any frame carrying + a header block (that is, HEADERS, + PUSH_PROMISE, and CONTINUATION), SETTINGS, + and any WINDOW_UPDATE frame with a stream identifier of 0. + + + Endpoints are not obligated to use all available space in a frame. Responsiveness can be + improved by using frames that are smaller than the permitted maximum size. Sending large + frames can result in delays in sending time-sensitive frames (such + RST_STREAM, WINDOW_UPDATE, or PRIORITY) + which if blocked by the transmission of a large frame, could affect performance. + +
+ +
+ + Just as in HTTP/1, a header field in HTTP/2 is a name with one or more associated values. + They are used within HTTP request and response messages as well as server push operations + (see ). + + + Header lists are collections of zero or more header fields. When transmitted over a + connection, a header list is serialized into a header block using HTTP Header Compression. The serialized header block is then + divided into one or more octet sequences, called header block fragments, and transmitted + within the payload of HEADERS, PUSH_PROMISE or CONTINUATION frames. + + + The Cookie header field is treated specially by the HTTP + mapping (see ). + + + A receiving endpoint reassembles the header block by concatenating its fragments, then + decompresses the block to reconstruct the header list. + + + A complete header block consists of either: + + + a single HEADERS or PUSH_PROMISE frame, + with the END_HEADERS flag set, or + + + a HEADERS or PUSH_PROMISE frame with the END_HEADERS + flag cleared and one or more CONTINUATION frames, + where the last CONTINUATION frame has the END_HEADERS flag set. + + + + + Header compression is stateful. One compression context and one decompression context is + used for the entire connection. Each header block is processed as a discrete unit. + Header blocks MUST be transmitted as a contiguous sequence of frames, with no interleaved + frames of any other type or from any other stream. The last frame in a sequence of + HEADERS or CONTINUATION frames MUST have the END_HEADERS + flag set. The last frame in a sequence of PUSH_PROMISE or + CONTINUATION frames MUST have the END_HEADERS flag set. This allows a + header block to be logically equivalent to a single frame. + + + Header block fragments can only be sent as the payload of HEADERS, + PUSH_PROMISE or CONTINUATION frames, because these frames + carry data that can modify the compression context maintained by a receiver. An endpoint + receiving HEADERS, PUSH_PROMISE or + CONTINUATION frames MUST reassemble header blocks and perform decompression + even if the frames are to be discarded. A receiver MUST terminate the connection with a + connection error of type + COMPRESSION_ERROR if it does not decompress a header block. + +
+
+ +
+ + A "stream" is an independent, bi-directional sequence of frames exchanged between the client + and server within an HTTP/2 connection. Streams have several important characteristics: + + + A single HTTP/2 connection can contain multiple concurrently open streams, with either + endpoint interleaving frames from multiple streams. + + + Streams can be established and used unilaterally or shared by either the client or + server. + + + Streams can be closed by either endpoint. + + + The order in which frames are sent on a stream is significant. Recipients process frames + in the order they are received. In particular, the order of HEADERS, + and DATA frames is semantically significant. + + + Streams are identified by an integer. Stream identifiers are assigned to streams by the + endpoint initiating the stream. + + + + +
+ + The lifecycle of a stream is shown in . + + +
+ + | |<-----------' | + | R | closed | R | + `-------------------->| |<--------------------' + +--------+ + + H: HEADERS frame (with implied CONTINUATIONs) + PP: PUSH_PROMISE frame (with implied CONTINUATIONs) + ES: END_STREAM flag + R: RST_STREAM frame +]]> + +
+ + + Note that this diagram shows stream state transitions and the frames and flags that affect + those transitions only. In this regard, CONTINUATION frames do not result + in state transitions; they are effectively part of the HEADERS or + PUSH_PROMISE that they follow. For this purpose, the END_STREAM flag is + processed as a separate event to the frame that bears it; a HEADERS frame + with the END_STREAM flag set can cause two state transitions. + + + Both endpoints have a subjective view of the state of a stream that could be different + when frames are in transit. Endpoints do not coordinate the creation of streams; they are + created unilaterally by either endpoint. The negative consequences of a mismatch in + states are limited to the "closed" state after sending RST_STREAM, where + frames might be received for some time after closing. + + + Streams have the following states: + + + + + + All streams start in the "idle" state. In this state, no frames have been + exchanged. + + + The following transitions are valid from this state: + + + Sending or receiving a HEADERS frame causes the stream to become + "open". The stream identifier is selected as described in . The same HEADERS frame can also + cause a stream to immediately become "half closed". + + + Sending a PUSH_PROMISE frame marks the associated stream for + later use. The stream state for the reserved stream transitions to "reserved + (local)". + + + Receiving a PUSH_PROMISE frame marks the associated stream as + reserved by the remote peer. The state of the stream becomes "reserved + (remote)". + + + + + Receiving any frames other than HEADERS or + PUSH_PROMISE on a stream in this state MUST be treated as a connection error of type + PROTOCOL_ERROR. + + + + + + + A stream in the "reserved (local)" state is one that has been promised by sending a + PUSH_PROMISE frame. A PUSH_PROMISE frame reserves an + idle stream by associating the stream with an open stream that was initiated by the + remote peer (see ). + + + In this state, only the following transitions are possible: + + + The endpoint can send a HEADERS frame. This causes the stream to + open in a "half closed (remote)" state. + + + Either endpoint can send a RST_STREAM frame to cause the stream + to become "closed". This releases the stream reservation. + + + + + An endpoint MUST NOT send any type of frame other than HEADERS or + RST_STREAM in this state. + + + A PRIORITY frame MAY be received in this state. Receiving any type + of frame other than RST_STREAM or PRIORITY on a stream + in this state MUST be treated as a connection + error of type PROTOCOL_ERROR. + + + + + + + A stream in the "reserved (remote)" state has been reserved by a remote peer. + + + In this state, only the following transitions are possible: + + + Receiving a HEADERS frame causes the stream to transition to + "half closed (local)". + + + Either endpoint can send a RST_STREAM frame to cause the stream + to become "closed". This releases the stream reservation. + + + + + An endpoint MAY send a PRIORITY frame in this state to reprioritize + the reserved stream. An endpoint MUST NOT send any type of frame other than + RST_STREAM, WINDOW_UPDATE, or PRIORITY + in this state. + + + Receiving any type of frame other than HEADERS or + RST_STREAM on a stream in this state MUST be treated as a connection error of type + PROTOCOL_ERROR. + + + + + + + A stream in the "open" state may be used by both peers to send frames of any type. + In this state, sending peers observe advertised stream + level flow control limits. + + + From this state either endpoint can send a frame with an END_STREAM flag set, which + causes the stream to transition into one of the "half closed" states: an endpoint + sending an END_STREAM flag causes the stream state to become "half closed (local)"; + an endpoint receiving an END_STREAM flag causes the stream state to become "half + closed (remote)". + + + Either endpoint can send a RST_STREAM frame from this state, causing + it to transition immediately to "closed". + + + + + + + A stream that is in the "half closed (local)" state cannot be used for sending + frames. Only WINDOW_UPDATE, PRIORITY and + RST_STREAM frames can be sent in this state. + + + A stream transitions from this state to "closed" when a frame that contains an + END_STREAM flag is received, or when either peer sends a RST_STREAM + frame. + + + A receiver can ignore WINDOW_UPDATE frames in this state, which might + arrive for a short period after a frame bearing the END_STREAM flag is sent. + + + PRIORITY frames received in this state are used to reprioritize + streams that depend on the current stream. + + + + + + + A stream that is "half closed (remote)" is no longer being used by the peer to send + frames. In this state, an endpoint is no longer obligated to maintain a receiver + flow control window if it performs flow control. + + + If an endpoint receives additional frames for a stream that is in this state, other + than WINDOW_UPDATE, PRIORITY or + RST_STREAM, it MUST respond with a stream error of type + STREAM_CLOSED. + + + A stream that is "half closed (remote)" can be used by the endpoint to send frames + of any type. In this state, the endpoint continues to observe advertised stream level flow control limits. + + + A stream can transition from this state to "closed" by sending a frame that contains + an END_STREAM flag, or when either peer sends a RST_STREAM frame. + + + + + + + The "closed" state is the terminal state. + + + An endpoint MUST NOT send frames other than PRIORITY on a closed + stream. An endpoint that receives any frame other than PRIORITY + after receiving a RST_STREAM MUST treat that as a stream error of type + STREAM_CLOSED. Similarly, an endpoint that receives any frames after + receiving a frame with the END_STREAM flag set MUST treat that as a connection error of type + STREAM_CLOSED, unless the frame is permitted as described below. + + + WINDOW_UPDATE or RST_STREAM frames can be received in + this state for a short period after a DATA or HEADERS + frame containing an END_STREAM flag is sent. Until the remote peer receives and + processes RST_STREAM or the frame bearing the END_STREAM flag, it + might send frames of these types. Endpoints MUST ignore + WINDOW_UPDATE or RST_STREAM frames received in this + state, though endpoints MAY choose to treat frames that arrive a significant time + after sending END_STREAM as a connection + error of type PROTOCOL_ERROR. + + + PRIORITY frames can be sent on closed streams to prioritize streams + that are dependent on the closed stream. Endpoints SHOULD process + PRIORITY frame, though they can be ignored if the stream has been + removed from the dependency tree (see ). + + + If this state is reached as a result of sending a RST_STREAM frame, + the peer that receives the RST_STREAM might have already sent - or + enqueued for sending - frames on the stream that cannot be withdrawn. An endpoint + MUST ignore frames that it receives on closed streams after it has sent a + RST_STREAM frame. An endpoint MAY choose to limit the period over + which it ignores frames and treat frames that arrive after this time as being in + error. + + + Flow controlled frames (i.e., DATA) received after sending + RST_STREAM are counted toward the connection flow control window. + Even though these frames might be ignored, because they are sent before the sender + receives the RST_STREAM, the sender will consider the frames to count + against the flow control window. + + + An endpoint might receive a PUSH_PROMISE frame after it sends + RST_STREAM. PUSH_PROMISE causes a stream to become + "reserved" even if the associated stream has been reset. Therefore, a + RST_STREAM is needed to close an unwanted promised stream. + + + + + + In the absence of more specific guidance elsewhere in this document, implementations + SHOULD treat the receipt of a frame that is not expressly permitted in the description of + a state as a connection error of type + PROTOCOL_ERROR. Frame of unknown types are ignored. + + + An example of the state transitions for an HTTP request/response exchange can be found in + . An example of the state transitions for server push can be + found in and . + + +
+ + Streams are identified with an unsigned 31-bit integer. Streams initiated by a client + MUST use odd-numbered stream identifiers; those initiated by the server MUST use + even-numbered stream identifiers. A stream identifier of zero (0x0) is used for + connection control messages; the stream identifier zero cannot be used to establish a + new stream. + + + HTTP/1.1 requests that are upgraded to HTTP/2 (see ) are + responded to with a stream identifier of one (0x1). After the upgrade + completes, stream 0x1 is "half closed (local)" to the client. Therefore, stream 0x1 + cannot be selected as a new stream identifier by a client that upgrades from HTTP/1.1. + + + The identifier of a newly established stream MUST be numerically greater than all + streams that the initiating endpoint has opened or reserved. This governs streams that + are opened using a HEADERS frame and streams that are reserved using + PUSH_PROMISE. An endpoint that receives an unexpected stream identifier + MUST respond with a connection error of + type PROTOCOL_ERROR. + + + The first use of a new stream identifier implicitly closes all streams in the "idle" + state that might have been initiated by that peer with a lower-valued stream identifier. + For example, if a client sends a HEADERS frame on stream 7 without ever + sending a frame on stream 5, then stream 5 transitions to the "closed" state when the + first frame for stream 7 is sent or received. + + + Stream identifiers cannot be reused. Long-lived connections can result in an endpoint + exhausting the available range of stream identifiers. A client that is unable to + establish a new stream identifier can establish a new connection for new streams. A + server that is unable to establish a new stream identifier can send a + GOAWAY frame so that the client is forced to open a new connection for + new streams. + +
+ +
+ + A peer can limit the number of concurrently active streams using the + SETTINGS_MAX_CONCURRENT_STREAMS parameter (see ) within a SETTINGS frame. The maximum concurrent + streams setting is specific to each endpoint and applies only to the peer that receives + the setting. That is, clients specify the maximum number of concurrent streams the + server can initiate, and servers specify the maximum number of concurrent streams the + client can initiate. + + + Streams that are in the "open" state, or either of the "half closed" states count toward + the maximum number of streams that an endpoint is permitted to open. Streams in any of + these three states count toward the limit advertised in the + SETTINGS_MAX_CONCURRENT_STREAMS setting. Streams in either of the + "reserved" states do not count toward the stream limit. + + + Endpoints MUST NOT exceed the limit set by their peer. An endpoint that receives a + HEADERS frame that causes their advertised concurrent stream limit to be + exceeded MUST treat this as a stream error. An + endpoint that wishes to reduce the value of + SETTINGS_MAX_CONCURRENT_STREAMS to a value that is below the current + number of open streams can either close streams that exceed the new value or allow + streams to complete. + +
+
+ +
+ + Using streams for multiplexing introduces contention over use of the TCP connection, + resulting in blocked streams. A flow control scheme ensures that streams on the same + connection do not destructively interfere with each other. Flow control is used for both + individual streams and for the connection as a whole. + + + HTTP/2 provides for flow control through use of the WINDOW_UPDATE frame. + + +
+ + HTTP/2 stream flow control aims to allow a variety of flow control algorithms to be + used without requiring protocol changes. Flow control in HTTP/2 has the following + characteristics: + + + Flow control is specific to a connection; i.e., it is "hop-by-hop", not + "end-to-end". + + + Flow control is based on window update frames. Receivers advertise how many octets + they are prepared to receive on a stream and for the entire connection. This is a + credit-based scheme. + + + Flow control is directional with overall control provided by the receiver. A + receiver MAY choose to set any window size that it desires for each stream and for + the entire connection. A sender MUST respect flow control limits imposed by a + receiver. Clients, servers and intermediaries all independently advertise their + flow control window as a receiver and abide by the flow control limits set by + their peer when sending. + + + The initial value for the flow control window is 65,535 octets for both new streams + and the overall connection. + + + The frame type determines whether flow control applies to a frame. Of the frames + specified in this document, only DATA frames are subject to flow + control; all other frame types do not consume space in the advertised flow control + window. This ensures that important control frames are not blocked by flow control. + + + Flow control cannot be disabled. + + + HTTP/2 defines only the format and semantics of the WINDOW_UPDATE + frame (). This document does not stipulate how a + receiver decides when to send this frame or the value that it sends, nor does it + specify how a sender chooses to send packets. Implementations are able to select + any algorithm that suits their needs. + + + + + Implementations are also responsible for managing how requests and responses are sent + based on priority; choosing how to avoid head of line blocking for requests; and + managing the creation of new streams. Algorithm choices for these could interact with + any flow control algorithm. + +
+ +
+ + Flow control is defined to protect endpoints that are operating under resource + constraints. For example, a proxy needs to share memory between many connections, and + also might have a slow upstream connection and a fast downstream one. Flow control + addresses cases where the receiver is unable process data on one stream, yet wants to + continue to process other streams in the same connection. + + + Deployments that do not require this capability can advertise a flow control window of + the maximum size, incrementing the available space when new data is received. This + effectively disables flow control for that receiver. Conversely, a sender is always + subject to the flow control window advertised by the receiver. + + + Deployments with constrained resources (for example, memory) can employ flow control to + limit the amount of memory a peer can consume. Note, however, that this can lead to + suboptimal use of available network resources if flow control is enabled without + knowledge of the bandwidth-delay product (see ). + + + Even with full awareness of the current bandwidth-delay product, implementation of flow + control can be difficult. When using flow control, the receiver MUST read from the TCP + receive buffer in a timely fashion. Failure to do so could lead to a deadlock when + critical frames, such as WINDOW_UPDATE, are not read and acted upon. + +
+
+ +
+ + A client can assign a priority for a new stream by including prioritization information in + the HEADERS frame that opens the stream. For an existing + stream, the PRIORITY frame can be used to change the + priority. + + + The purpose of prioritization is to allow an endpoint to express how it would prefer its + peer allocate resources when managing concurrent streams. Most importantly, priority can + be used to select streams for transmitting frames when there is limited capacity for + sending. + + + Streams can be prioritized by marking them as dependent on the completion of other streams + (). Each dependency is assigned a relative weight, a number + that is used to determine the relative proportion of available resources that are assigned + to streams dependent on the same stream. + + + + Explicitly setting the priority for a stream is input to a prioritization process. It + does not guarantee any particular processing or transmission order for the stream relative + to any other stream. An endpoint cannot force a peer to process concurrent streams in a + particular order using priority. Expressing priority is therefore only ever a suggestion. + + + Providing prioritization information is optional, so default values are used if no + explicit indicator is provided (). + + +
+ + Each stream can be given an explicit dependency on another stream. Including a + dependency expresses a preference to allocate resources to the identified stream rather + than to the dependent stream. + + + A stream that is not dependent on any other stream is given a stream dependency of 0x0. + In other words, the non-existent stream 0 forms the root of the tree. + + + A stream that depends on another stream is a dependent stream. The stream upon which a + stream is dependent is a parent stream. A dependency on a stream that is not currently + in the tree - such as a stream in the "idle" state - results in that stream being given + a default priority. + + + When assigning a dependency on another stream, the stream is added as a new dependency + of the parent stream. Dependent streams that share the same parent are not ordered with + respect to each other. For example, if streams B and C are dependent on stream A, and + if stream D is created with a dependency on stream A, this results in a dependency order + of A followed by B, C, and D in any order. + +
+ /|\ + B C B D C +]]> +
+ + An exclusive flag allows for the insertion of a new level of dependencies. The + exclusive flag causes the stream to become the sole dependency of its parent stream, + causing other dependencies to become dependent on the exclusive stream. In the + previous example, if stream D is created with an exclusive dependency on stream A, this + results in D becoming the dependency parent of B and C. + +
+ D + B C / \ + B C +]]> +
+ + Inside the dependency tree, a dependent stream SHOULD only be allocated resources if all + of the streams that it depends on (the chain of parent streams up to 0x0) are either + closed, or it is not possible to make progress on them. + + + A stream cannot depend on itself. An endpoint MUST treat this as a stream error of type PROTOCOL_ERROR. + +
+ +
+ + All dependent streams are allocated an integer weight between 1 and 256 (inclusive). + + + Streams with the same parent SHOULD be allocated resources proportionally based on their + weight. Thus, if stream B depends on stream A with weight 4, and C depends on stream A + with weight 12, and if no progress can be made on A, stream B ideally receives one third + of the resources allocated to stream C. + +
+ +
+ + Stream priorities are changed using the PRIORITY frame. Setting a + dependency causes a stream to become dependent on the identified parent stream. + + + Dependent streams move with their parent stream if the parent is reprioritized. Setting + a dependency with the exclusive flag for a reprioritized stream moves all the + dependencies of the new parent stream to become dependent on the reprioritized stream. + + + If a stream is made dependent on one of its own dependencies, the formerly dependent + stream is first moved to be dependent on the reprioritized stream's previous parent. + The moved dependency retains its weight. + +
+ + For example, consider an original dependency tree where B and C depend on A, D and E + depend on C, and F depends on D. If A is made dependent on D, then D takes the place + of A. All other dependency relationships stay the same, except for F, which becomes + dependent on A if the reprioritization is exclusive. + + F B C ==> F A OR A + / \ | / \ /|\ + D E E B C B C F + | | | + F E E + (intermediate) (non-exclusive) (exclusive) +]]> +
+
+ +
+ + When a stream is removed from the dependency tree, its dependencies can be moved to + become dependent on the parent of the closed stream. The weights of new dependencies + are recalculated by distributing the weight of the dependency of the closed stream + proportionally based on the weights of its dependencies. + + + Streams that are removed from the dependency tree cause some prioritization information + to be lost. Resources are shared between streams with the same parent stream, which + means that if a stream in that set closes or becomes blocked, any spare capacity + allocated to a stream is distributed to the immediate neighbors of the stream. However, + if the common dependency is removed from the tree, those streams share resources with + streams at the next highest level. + + + For example, assume streams A and B share a parent, and streams C and D both depend on + stream A. Prior to the removal of stream A, if streams A and D are unable to proceed, + then stream C receives all the resources dedicated to stream A. If stream A is removed + from the tree, the weight of stream A is divided between streams C and D. If stream D + is still unable to proceed, this results in stream C receiving a reduced proportion of + resources. For equal starting weights, C receives one third, rather than one half, of + available resources. + + + It is possible for a stream to become closed while prioritization information that + creates a dependency on that stream is in transit. If a stream identified in a + dependency has no associated priority information, then the dependent stream is instead + assigned a default priority. This potentially creates + suboptimal prioritization, since the stream could be given a priority that is different + to what is intended. + + + To avoid these problems, an endpoint SHOULD retain stream prioritization state for a + period after streams become closed. The longer state is retained, the lower the chance + that streams are assigned incorrect or default priority values. + + + This could create a large state burden for an endpoint, so this state MAY be limited. + An endpoint MAY apply a fixed upper limit on the number of closed streams for which + prioritization state is tracked to limit state exposure. The amount of additional state + an endpoint maintains could be dependent on load; under high load, prioritization state + can be discarded to limit resource commitments. In extreme cases, an endpoint could + even discard prioritization state for active or reserved streams. If a fixed limit is + applied, endpoints SHOULD maintain state for at least as many streams as allowed by + their setting for SETTINGS_MAX_CONCURRENT_STREAMS. + + + An endpoint receiving a PRIORITY frame that changes the priority of a + closed stream SHOULD alter the dependencies of the streams that depend on it, if it has + retained enough state to do so. + +
+ +
+ + Providing priority information is optional. Streams are assigned a non-exclusive + dependency on stream 0x0 by default. Pushed streams + initially depend on their associated stream. In both cases, streams are assigned a + default weight of 16. + +
+
+ +
+ + HTTP/2 framing permits two classes of error: + + + An error condition that renders the entire connection unusable is a connection error. + + + An error in an individual stream is a stream error. + + + + + A list of error codes is included in . + + +
+ + A connection error is any error which prevents further processing of the framing layer, + or which corrupts any connection state. + + + An endpoint that encounters a connection error SHOULD first send a GOAWAY + frame () with the stream identifier of the last stream that it + successfully received from its peer. The GOAWAY frame includes an error + code that indicates why the connection is terminating. After sending the + GOAWAY frame, the endpoint MUST close the TCP connection. + + + It is possible that the GOAWAY will not be reliably received by the + receiving endpoint (see ). In the event of a connection error, + GOAWAY only provides a best effort attempt to communicate with the peer + about why the connection is being terminated. + + + An endpoint can end a connection at any time. In particular, an endpoint MAY choose to + treat a stream error as a connection error. Endpoints SHOULD send a + GOAWAY frame when ending a connection, providing that circumstances + permit it. + +
+ +
+ + A stream error is an error related to a specific stream that does not affect processing + of other streams. + + + An endpoint that detects a stream error sends a RST_STREAM frame () that contains the stream identifier of the stream where the error + occurred. The RST_STREAM frame includes an error code that indicates the + type of error. + + + A RST_STREAM is the last frame that an endpoint can send on a stream. + The peer that sends the RST_STREAM frame MUST be prepared to receive any + frames that were sent or enqueued for sending by the remote peer. These frames can be + ignored, except where they modify connection state (such as the state maintained for + header compression, or flow control). + + + Normally, an endpoint SHOULD NOT send more than one RST_STREAM frame for + any stream. However, an endpoint MAY send additional RST_STREAM frames if + it receives frames on a closed stream after more than a round-trip time. This behavior + is permitted to deal with misbehaving implementations. + + + An endpoint MUST NOT send a RST_STREAM in response to an + RST_STREAM frame, to avoid looping. + +
+ +
+ + If the TCP connection is closed or reset while streams remain in open or half closed + states, then the endpoint MUST assume that those streams were abnormally interrupted and + could be incomplete. + +
+
+ +
+ + HTTP/2 permits extension of the protocol. Protocol extensions can be used to provide + additional services or alter any aspect of the protocol, within the limitations described + in this section. Extensions are effective only within the scope of a single HTTP/2 + connection. + + + Extensions are permitted to use new frame types, new + settings, or new error + codes. Registries are established for managing these extension points: frame types, settings and + error codes. + + + Implementations MUST ignore unknown or unsupported values in all extensible protocol + elements. Implementations MUST discard frames that have unknown or unsupported types. + This means that any of these extension points can be safely used by extensions without + prior arrangement or negotiation. However, extension frames that appear in the middle of + a header block are not permitted; these MUST be treated + as a connection error of type + PROTOCOL_ERROR. + + + However, extensions that could change the semantics of existing protocol components MUST + be negotiated before being used. For example, an extension that changes the layout of the + HEADERS frame cannot be used until the peer has given a positive signal + that this is acceptable. In this case, it could also be necessary to coordinate when the + revised layout comes into effect. Note that treating any frame other than + DATA frames as flow controlled is such a change in semantics, and can only + be done through negotiation. + + + This document doesn't mandate a specific method for negotiating the use of an extension, + but notes that a setting could be used for that + purpose. If both peers set a value that indicates willingness to use the extension, then + the extension can be used. If a setting is used for extension negotiation, the initial + value MUST be defined so that the extension is initially disabled. + +
+
+ +
+ + This specification defines a number of frame types, each identified by a unique 8-bit type + code. Each frame type serves a distinct purpose either in the establishment and management + of the connection as a whole, or of individual streams. + + + The transmission of specific frame types can alter the state of a connection. If endpoints + fail to maintain a synchronized view of the connection state, successful communication + within the connection will no longer be possible. Therefore, it is important that endpoints + have a shared comprehension of how the state is affected by the use any given frame. + + +
+ + DATA frames (type=0x0) convey arbitrary, variable-length sequences of octets associated + with a stream. One or more DATA frames are used, for instance, to carry HTTP request or + response payloads. + + + DATA frames MAY also contain arbitrary padding. Padding can be added to DATA frames to + obscure the size of messages. + +
+ +
+ + The DATA frame contains the following fields: + + + An 8-bit field containing the length of the frame padding in units of octets. This + field is optional and is only present if the PADDED flag is set. + + + Application data. The amount of data is the remainder of the frame payload after + subtracting the length of the other fields that are present. + + + Padding octets that contain no application semantic value. Padding octets MUST be set + to zero when sending and ignored when receiving. + + + + + + The DATA frame defines the following flags: + + + Bit 1 being set indicates that this frame is the last that the endpoint will send for + the identified stream. Setting this flag causes the stream to enter one of the "half closed" states or the "closed" state. + + + Bit 4 being set indicates that the Pad Length field and any padding that it describes + is present. + + + + + DATA frames MUST be associated with a stream. If a DATA frame is received whose stream + identifier field is 0x0, the recipient MUST respond with a connection error of type + PROTOCOL_ERROR. + + + DATA frames are subject to flow control and can only be sent when a stream is in the + "open" or "half closed (remote)" states. The entire DATA frame payload is included in flow + control, including Pad Length and Padding fields if present. If a DATA frame is received + whose stream is not in "open" or "half closed (local)" state, the recipient MUST respond + with a stream error of type + STREAM_CLOSED. + + + The total number of padding octets is determined by the value of the Pad Length field. If + the length of the padding is greater than the length of the frame payload, the recipient + MUST treat this as a connection error of + type PROTOCOL_ERROR. + + + A frame can be increased in size by one octet by including a Pad Length field with a + value of zero. + + + + + Padding is a security feature; see . + +
+ +
+ + The HEADERS frame (type=0x1) is used to open a stream, + and additionally carries a header block fragment. HEADERS frames can be sent on a stream + in the "open" or "half closed (remote)" states. + +
+ +
+ + The HEADERS frame payload has the following fields: + + + An 8-bit field containing the length of the frame padding in units of octets. This + field is only present if the PADDED flag is set. + + + A single bit flag indicates that the stream dependency is exclusive, see . This field is only present if the PRIORITY flag is set. + + + A 31-bit stream identifier for the stream that this stream depends on, see . This field is only present if the PRIORITY flag is set. + + + An 8-bit weight for the stream, see . Add one to the + value to obtain a weight between 1 and 256. This field is only present if the + PRIORITY flag is set. + + + A header block fragment. + + + Padding octets that contain no application semantic value. Padding octets MUST be set + to zero when sending and ignored when receiving. + + + + + + The HEADERS frame defines the following flags: + + + + Bit 1 being set indicates that the header block is + the last that the endpoint will send for the identified stream. Setting this flag + causes the stream to enter one of "half closed" + states. + + + A HEADERS frame carries the END_STREAM flag that signals the end of a stream. + However, a HEADERS frame with the END_STREAM flag set can be followed by + CONTINUATION frames on the same stream. Logically, the + CONTINUATION frames are part of the HEADERS frame. + + + + + Bit 3 being set indicates that this frame contains an entire header block and is not followed by any + CONTINUATION frames. + + + A HEADERS frame without the END_HEADERS flag set MUST be followed by a + CONTINUATION frame for the same stream. A receiver MUST treat the + receipt of any other type of frame or a frame on a different stream as a connection error of type + PROTOCOL_ERROR. + + + + + Bit 4 being set indicates that the Pad Length field and any padding that it + describes is present. + + + + + Bit 6 being set indicates that the Exclusive Flag (E), Stream Dependency, and Weight + fields are present; see . + + + + + + + The payload of a HEADERS frame contains a header block + fragment. A header block that does not fit within a HEADERS frame is continued in + a CONTINUATION frame. + + + + HEADERS frames MUST be associated with a stream. If a HEADERS frame is received whose + stream identifier field is 0x0, the recipient MUST respond with a connection error of type + PROTOCOL_ERROR. + + + + The HEADERS frame changes the connection state as described in . + + + + The HEADERS frame includes optional padding. Padding fields and flags are identical to + those defined for DATA frames. + + + Prioritization information in a HEADERS frame is logically equivalent to a separate + PRIORITY frame, but inclusion in HEADERS avoids the potential for churn in + stream prioritization when new streams are created. Priorization fields in HEADERS frames + subsequent to the first on a stream reprioritize the + stream. + +
+ +
+ + The PRIORITY frame (type=0x2) specifies the sender-advised + priority of a stream. It can be sent at any time for an existing stream, including + closed streams. This enables reprioritization of existing streams. + +
+ +
+ + The payload of a PRIORITY frame contains the following fields: + + + A single bit flag indicates that the stream dependency is exclusive, see . + + + A 31-bit stream identifier for the stream that this stream depends on, see . + + + An 8-bit weight for the identified stream dependency, see . Add one to the value to obtain a weight between 1 and 256. + + + + + + The PRIORITY frame does not define any flags. + + + + The PRIORITY frame is associated with an existing stream. If a PRIORITY frame is received + with a stream identifier of 0x0, the recipient MUST respond with a connection error of type + PROTOCOL_ERROR. + + + The PRIORITY frame can be sent on a stream in any of the "reserved (remote)", "open", + "half closed (local)", "half closed (remote)", or "closed" states, though it cannot be + sent between consecutive frames that comprise a single header + block. Note that this frame could arrive after processing or frame sending has + completed, which would cause it to have no effect on the current stream. For a stream + that is in the "half closed (remote)" or "closed" - state, this frame can only affect + processing of the current stream and not frame transmission. + + + The PRIORITY frame is the only frame that can be sent for a stream in the "closed" state. + This allows for the reprioritization of a group of dependent streams by altering the + priority of a parent stream, which might be closed. However, a PRIORITY frame sent on a + closed stream risks being ignored due to the peer having discarded priority state + information for that stream. + +
+ +
+ + The RST_STREAM frame (type=0x3) allows for abnormal termination of a stream. When sent by + the initiator of a stream, it indicates that they wish to cancel the stream or that an + error condition has occurred. When sent by the receiver of a stream, it indicates that + either the receiver is rejecting the stream, requesting that the stream be cancelled, or + that an error condition has occurred. + +
+ +
+ + + The RST_STREAM frame contains a single unsigned, 32-bit integer identifying the error code. The error code indicates why the stream is being + terminated. + + + + The RST_STREAM frame does not define any flags. + + + + The RST_STREAM frame fully terminates the referenced stream and causes it to enter the + closed state. After receiving a RST_STREAM on a stream, the receiver MUST NOT send + additional frames for that stream, with the exception of PRIORITY. However, + after sending the RST_STREAM, the sending endpoint MUST be prepared to receive and process + additional frames sent on the stream that might have been sent by the peer prior to the + arrival of the RST_STREAM. + + + + RST_STREAM frames MUST be associated with a stream. If a RST_STREAM frame is received + with a stream identifier of 0x0, the recipient MUST treat this as a connection error of type + PROTOCOL_ERROR. + + + + RST_STREAM frames MUST NOT be sent for a stream in the "idle" state. If a RST_STREAM + frame identifying an idle stream is received, the recipient MUST treat this as a connection error of type + PROTOCOL_ERROR. + + +
+ +
+ + The SETTINGS frame (type=0x4) conveys configuration parameters that affect how endpoints + communicate, such as preferences and constraints on peer behavior. The SETTINGS frame is + also used to acknowledge the receipt of those parameters. Individually, a SETTINGS + parameter can also be referred to as a "setting". + + + SETTINGS parameters are not negotiated; they describe characteristics of the sending peer, + which are used by the receiving peer. Different values for the same parameter can be + advertised by each peer. For example, a client might set a high initial flow control + window, whereas a server might set a lower value to conserve resources. + + + + A SETTINGS frame MUST be sent by both endpoints at the start of a connection, and MAY be + sent at any other time by either endpoint over the lifetime of the connection. + Implementations MUST support all of the parameters defined by this specification. + + + + Each parameter in a SETTINGS frame replaces any existing value for that parameter. + Parameters are processed in the order in which they appear, and a receiver of a SETTINGS + frame does not need to maintain any state other than the current value of its + parameters. Therefore, the value of a SETTINGS parameter is the last value that is seen by + a receiver. + + + SETTINGS parameters are acknowledged by the receiving peer. To enable this, the SETTINGS + frame defines the following flag: + + + Bit 1 being set indicates that this frame acknowledges receipt and application of the + peer's SETTINGS frame. When this bit is set, the payload of the SETTINGS frame MUST + be empty. Receipt of a SETTINGS frame with the ACK flag set and a length field value + other than 0 MUST be treated as a connection + error of type FRAME_SIZE_ERROR. For more info, see Settings Synchronization. + + + + + SETTINGS frames always apply to a connection, never a single stream. The stream + identifier for a SETTINGS frame MUST be zero (0x0). If an endpoint receives a SETTINGS + frame whose stream identifier field is anything other than 0x0, the endpoint MUST respond + with a connection error of type + PROTOCOL_ERROR. + + + The SETTINGS frame affects connection state. A badly formed or incomplete SETTINGS frame + MUST be treated as a connection error of type + PROTOCOL_ERROR. + + +
+ + The payload of a SETTINGS frame consists of zero or more parameters, each consisting of + an unsigned 16-bit setting identifier and an unsigned 32-bit value. + + +
+ +
+
+ +
+ + The following parameters are defined: + + + + Allows the sender to inform the remote endpoint of the maximum size of the header + compression table used to decode header blocks, in octets. The encoder can select + any size equal to or less than this value by using signaling specific to the + header compression format inside a header block. The initial value is 4,096 + octets. + + + + + This setting can be use to disable server + push. An endpoint MUST NOT send a PUSH_PROMISE frame if it + receives this parameter set to a value of 0. An endpoint that has both set this + parameter to 0 and had it acknowledged MUST treat the receipt of a + PUSH_PROMISE frame as a connection error of type + PROTOCOL_ERROR. + + + The initial value is 1, which indicates that server push is permitted. Any value + other than 0 or 1 MUST be treated as a connection error of type + PROTOCOL_ERROR. + + + + + Indicates the maximum number of concurrent streams that the sender will allow. + This limit is directional: it applies to the number of streams that the sender + permits the receiver to create. Initially there is no limit to this value. It is + recommended that this value be no smaller than 100, so as to not unnecessarily + limit parallelism. + + + A value of 0 for SETTINGS_MAX_CONCURRENT_STREAMS SHOULD NOT be treated as special + by endpoints. A zero value does prevent the creation of new streams, however this + can also happen for any limit that is exhausted with active streams. Servers + SHOULD only set a zero value for short durations; if a server does not wish to + accept requests, closing the connection could be preferable. + + + + + Indicates the sender's initial window size (in octets) for stream level flow + control. The initial value is 216-1 (65,535) octets. + + + This setting affects the window size of all streams, including existing streams, + see . + + + Values above the maximum flow control window size of 231-1 MUST + be treated as a connection error of + type FLOW_CONTROL_ERROR. + + + + + Indicates the size of the largest frame payload that the sender is willing to + receive, in octets. + + + The initial value is 214 (16,384) octets. The value advertised by + an endpoint MUST be between this initial value and the maximum allowed frame size + (224-1 or 16,777,215 octets), inclusive. Values outside this range + MUST be treated as a connection error + of type PROTOCOL_ERROR. + + + + + This advisory setting informs a peer of the maximum size of header list that the + sender is prepared to accept, in octets. The value is based on the uncompressed + size of header fields, including the length of the name and value in octets plus + an overhead of 32 octets for each header field. + + + For any given request, a lower limit than what is advertised MAY be enforced. The + initial value of this setting is unlimited. + + + + + + An endpoint that receives a SETTINGS frame with any unknown or unsupported identifier + MUST ignore that setting. + +
+ +
+ + Most values in SETTINGS benefit from or require an understanding of when the peer has + received and applied the changed parameter values. In order to provide + such synchronization timepoints, the recipient of a SETTINGS frame in which the ACK flag + is not set MUST apply the updated parameters as soon as possible upon receipt. + + + The values in the SETTINGS frame MUST be processed in the order they appear, with no + other frame processing between values. Unsupported parameters MUST be ignored. Once + all values have been processed, the recipient MUST immediately emit a SETTINGS frame + with the ACK flag set. Upon receiving a SETTINGS frame with the ACK flag set, the sender + of the altered parameters can rely on the setting having been applied. + + + If the sender of a SETTINGS frame does not receive an acknowledgement within a + reasonable amount of time, it MAY issue a connection error of type + SETTINGS_TIMEOUT. + +
+
+ +
+ + The PUSH_PROMISE frame (type=0x5) is used to notify the peer endpoint in advance of + streams the sender intends to initiate. The PUSH_PROMISE frame includes the unsigned + 31-bit identifier of the stream the endpoint plans to create along with a set of headers + that provide additional context for the stream. contains a + thorough description of the use of PUSH_PROMISE frames. + + +
+ +
+ + The PUSH_PROMISE frame payload has the following fields: + + + An 8-bit field containing the length of the frame padding in units of octets. This + field is only present if the PADDED flag is set. + + + A single reserved bit. + + + An unsigned 31-bit integer that identifies the stream that is reserved by the + PUSH_PROMISE. The promised stream identifier MUST be a valid choice for the next + stream sent by the sender (see new stream + identifier). + + + A header block fragment containing request header + fields. + + + Padding octets. + + + + + + The PUSH_PROMISE frame defines the following flags: + + + + Bit 3 being set indicates that this frame contains an entire header block and is not followed by any + CONTINUATION frames. + + + A PUSH_PROMISE frame without the END_HEADERS flag set MUST be followed by a + CONTINUATION frame for the same stream. A receiver MUST treat the receipt of any + other type of frame or a frame on a different stream as a connection error of type + PROTOCOL_ERROR. + + + + + Bit 4 being set indicates that the Pad Length field and any padding that it + describes is present. + + + + + + + PUSH_PROMISE frames MUST be associated with an existing, peer-initiated stream. The stream + identifier of a PUSH_PROMISE frame indicates the stream it is associated with. If the + stream identifier field specifies the value 0x0, a recipient MUST respond with a connection error of type + PROTOCOL_ERROR. + + + + Promised streams are not required to be used in the order they are promised. The + PUSH_PROMISE only reserves stream identifiers for later use. + + + + PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH setting of the + peer endpoint is set to 0. An endpoint that has set this setting and has received + acknowledgement MUST treat the receipt of a PUSH_PROMISE frame as a connection error of type + PROTOCOL_ERROR. + + + Recipients of PUSH_PROMISE frames can choose to reject promised streams by returning a + RST_STREAM referencing the promised stream identifier back to the sender of + the PUSH_PROMISE. + + + + A PUSH_PROMISE frame modifies the connection state in two ways. The inclusion of a header block potentially modifies the state maintained for + header compression. PUSH_PROMISE also reserves a stream for later use, causing the + promised stream to enter the "reserved" state. A sender MUST NOT send a PUSH_PROMISE on a + stream unless that stream is either "open" or "half closed (remote)"; the sender MUST + ensure that the promised stream is a valid choice for a new stream identifier (that is, the promised stream MUST + be in the "idle" state). + + + Since PUSH_PROMISE reserves a stream, ignoring a PUSH_PROMISE frame causes the stream + state to become indeterminate. A receiver MUST treat the receipt of a PUSH_PROMISE on a + stream that is neither "open" nor "half closed (local)" as a connection error of type + PROTOCOL_ERROR. However, an endpoint that has sent + RST_STREAM on the associated stream MUST handle PUSH_PROMISE frames that + might have been created before the RST_STREAM frame is received and + processed. + + + A receiver MUST treat the receipt of a PUSH_PROMISE that promises an illegal stream identifier (that is, an identifier for a + stream that is not currently in the "idle" state) as a connection error of type + PROTOCOL_ERROR. + + + + The PUSH_PROMISE frame includes optional padding. Padding fields and flags are identical + to those defined for DATA frames. + +
+ +
+ + The PING frame (type=0x6) is a mechanism for measuring a minimal round trip time from the + sender, as well as determining whether an idle connection is still functional. PING + frames can be sent from any endpoint. + +
+ +
+ + + In addition to the frame header, PING frames MUST contain 8 octets of data in the payload. + A sender can include any value it chooses and use those bytes in any fashion. + + + Receivers of a PING frame that does not include an ACK flag MUST send a PING frame with + the ACK flag set in response, with an identical payload. PING responses SHOULD be given + higher priority than any other frame. + + + + The PING frame defines the following flags: + + + Bit 1 being set indicates that this PING frame is a PING response. An endpoint MUST + set this flag in PING responses. An endpoint MUST NOT respond to PING frames + containing this flag. + + + + + PING frames are not associated with any individual stream. If a PING frame is received + with a stream identifier field value other than 0x0, the recipient MUST respond with a + connection error of type + PROTOCOL_ERROR. + + + Receipt of a PING frame with a length field value other than 8 MUST be treated as a connection error of type + FRAME_SIZE_ERROR. + + +
+ +
+ + The GOAWAY frame (type=0x7) informs the remote peer to stop creating streams on this + connection. GOAWAY can be sent by either the client or the server. Once sent, the sender + will ignore frames sent on any new streams with identifiers higher than the included last + stream identifier. Receivers of a GOAWAY frame MUST NOT open additional streams on the + connection, although a new connection can be established for new streams. + + + The purpose of this frame is to allow an endpoint to gracefully stop accepting new + streams, while still finishing processing of previously established streams. This enables + administrative actions, like server maintainance. + + + There is an inherent race condition between an endpoint starting new streams and the + remote sending a GOAWAY frame. To deal with this case, the GOAWAY contains the stream + identifier of the last peer-initiated stream which was or might be processed on the + sending endpoint in this connection. For instance, if the server sends a GOAWAY frame, + the identified stream is the highest numbered stream initiated by the client. + + + If the receiver of the GOAWAY has sent data on streams with a higher stream identifier + than what is indicated in the GOAWAY frame, those streams are not or will not be + processed. The receiver of the GOAWAY frame can treat the streams as though they had + never been created at all, thereby allowing those streams to be retried later on a new + connection. + + + Endpoints SHOULD always send a GOAWAY frame before closing a connection so that the remote + can know whether a stream has been partially processed or not. For example, if an HTTP + client sends a POST at the same time that a server closes a connection, the client cannot + know if the server started to process that POST request if the server does not send a + GOAWAY frame to indicate what streams it might have acted on. + + + An endpoint might choose to close a connection without sending GOAWAY for misbehaving + peers. + + +
+ +
+ + The GOAWAY frame does not define any flags. + + + The GOAWAY frame applies to the connection, not a specific stream. An endpoint MUST treat + a GOAWAY frame with a stream identifier other than 0x0 as a connection error of type + PROTOCOL_ERROR. + + + The last stream identifier in the GOAWAY frame contains the highest numbered stream + identifier for which the sender of the GOAWAY frame might have taken some action on, or + might yet take action on. All streams up to and including the identified stream might + have been processed in some way. The last stream identifier can be set to 0 if no streams + were processed. + + + In this context, "processed" means that some data from the stream was passed to some + higher layer of software that might have taken some action as a result. + + + If a connection terminates without a GOAWAY frame, the last stream identifier is + effectively the highest possible stream identifier. + + + On streams with lower or equal numbered identifiers that were not closed completely prior + to the connection being closed, re-attempting requests, transactions, or any protocol + activity is not possible, with the exception of idempotent actions like HTTP GET, PUT, or + DELETE. Any protocol activity that uses higher numbered streams can be safely retried + using a new connection. + + + Activity on streams numbered lower or equal to the last stream identifier might still + complete successfully. The sender of a GOAWAY frame might gracefully shut down a + connection by sending a GOAWAY frame, maintaining the connection in an open state until + all in-progress streams complete. + + + An endpoint MAY send multiple GOAWAY frames if circumstances change. For instance, an + endpoint that sends GOAWAY with NO_ERROR during graceful shutdown could + subsequently encounter an condition that requires immediate termination of the connection. + The last stream identifier from the last GOAWAY frame received indicates which streams + could have been acted upon. Endpoints MUST NOT increase the value they send in the last + stream identifier, since the peers might already have retried unprocessed requests on + another connection. + + + A client that is unable to retry requests loses all requests that are in flight when the + server closes the connection. This is especially true for intermediaries that might + not be serving clients using HTTP/2. A server that is attempting to gracefully shut down + a connection SHOULD send an initial GOAWAY frame with the last stream identifier set to + 231-1 and a NO_ERROR code. This signals to the client that + a shutdown is imminent and that no further requests can be initiated. After waiting at + least one round trip time, the server can send another GOAWAY frame with an updated last + stream identifier. This ensures that a connection can be cleanly shut down without losing + requests. + + + + After sending a GOAWAY frame, the sender can discard frames for streams with identifiers + higher than the identified last stream. However, any frames that alter connection state + cannot be completely ignored. For instance, HEADERS, + PUSH_PROMISE and CONTINUATION frames MUST be minimally + processed to ensure the state maintained for header compression is consistent (see ); similarly DATA frames MUST be counted toward the connection flow + control window. Failure to process these frames can cause flow control or header + compression state to become unsynchronized. + + + + The GOAWAY frame also contains a 32-bit error code that + contains the reason for closing the connection. + + + Endpoints MAY append opaque data to the payload of any GOAWAY frame. Additional debug + data is intended for diagnostic purposes only and carries no semantic value. Debug + information could contain security- or privacy-sensitive data. Logged or otherwise + persistently stored debug data MUST have adequate safeguards to prevent unauthorized + access. + +
+ +
+ + The WINDOW_UPDATE frame (type=0x8) is used to implement flow control; see for an overview. + + + Flow control operates at two levels: on each individual stream and on the entire + connection. + + + Both types of flow control are hop-by-hop; that is, only between the two endpoints. + Intermediaries do not forward WINDOW_UPDATE frames between dependent connections. + However, throttling of data transfer by any receiver can indirectly cause the propagation + of flow control information toward the original sender. + + + Flow control only applies to frames that are identified as being subject to flow control. + Of the frame types defined in this document, this includes only DATA frames. + Frames that are exempt from flow control MUST be accepted and processed, unless the + receiver is unable to assign resources to handling the frame. A receiver MAY respond with + a stream error or connection error of type + FLOW_CONTROL_ERROR if it is unable to accept a frame. + +
+ +
+ + The payload of a WINDOW_UPDATE frame is one reserved bit, plus an unsigned 31-bit integer + indicating the number of octets that the sender can transmit in addition to the existing + flow control window. The legal range for the increment to the flow control window is 1 to + 231-1 (0x7fffffff) octets. + + + The WINDOW_UPDATE frame does not define any flags. + + + The WINDOW_UPDATE frame can be specific to a stream or to the entire connection. In the + former case, the frame's stream identifier indicates the affected stream; in the latter, + the value "0" indicates that the entire connection is the subject of the frame. + + + A receiver MUST treat the receipt of a WINDOW_UPDATE frame with an flow control window + increment of 0 as a stream error of type + PROTOCOL_ERROR; errors on the connection flow control window MUST be + treated as a connection error. + + + WINDOW_UPDATE can be sent by a peer that has sent a frame bearing the END_STREAM flag. + This means that a receiver could receive a WINDOW_UPDATE frame on a "half closed (remote)" + or "closed" stream. A receiver MUST NOT treat this as an error, see . + + + A receiver that receives a flow controlled frame MUST always account for its contribution + against the connection flow control window, unless the receiver treats this as a connection error. This is necessary even if the + frame is in error. Since the sender counts the frame toward the flow control window, if + the receiver does not, the flow control window at sender and receiver can become + different. + + +
+ + Flow control in HTTP/2 is implemented using a window kept by each sender on every + stream. The flow control window is a simple integer value that indicates how many octets + of data the sender is permitted to transmit; as such, its size is a measure of the + buffering capacity of the receiver. + + + Two flow control windows are applicable: the stream flow control window and the + connection flow control window. The sender MUST NOT send a flow controlled frame with a + length that exceeds the space available in either of the flow control windows advertised + by the receiver. Frames with zero length with the END_STREAM flag set (that is, an + empty DATA frame) MAY be sent if there is no available space in either + flow control window. + + + For flow control calculations, the 9 octet frame header is not counted. + + + After sending a flow controlled frame, the sender reduces the space available in both + windows by the length of the transmitted frame. + + + The receiver of a frame sends a WINDOW_UPDATE frame as it consumes data and frees up + space in flow control windows. Separate WINDOW_UPDATE frames are sent for the stream + and connection level flow control windows. + + + A sender that receives a WINDOW_UPDATE frame updates the corresponding window by the + amount specified in the frame. + + + A sender MUST NOT allow a flow control window to exceed 231-1 octets. + If a sender receives a WINDOW_UPDATE that causes a flow control window to exceed this + maximum it MUST terminate either the stream or the connection, as appropriate. For + streams, the sender sends a RST_STREAM with the error code of + FLOW_CONTROL_ERROR code; for the connection, a GOAWAY + frame with a FLOW_CONTROL_ERROR code. + + + Flow controlled frames from the sender and WINDOW_UPDATE frames from the receiver are + completely asynchronous with respect to each other. This property allows a receiver to + aggressively update the window size kept by the sender to prevent streams from stalling. + +
+ +
+ + When an HTTP/2 connection is first established, new streams are created with an initial + flow control window size of 65,535 octets. The connection flow control window is 65,535 + octets. Both endpoints can adjust the initial window size for new streams by including + a value for SETTINGS_INITIAL_WINDOW_SIZE in the SETTINGS + frame that forms part of the connection preface. The connection flow control window can + only be changed using WINDOW_UPDATE frames. + + + Prior to receiving a SETTINGS frame that sets a value for + SETTINGS_INITIAL_WINDOW_SIZE, an endpoint can only use the default + initial window size when sending flow controlled frames. Similarly, the connection flow + control window is set to the default initial window size until a WINDOW_UPDATE frame is + received. + + + A SETTINGS frame can alter the initial flow control window size for all + current streams. When the value of SETTINGS_INITIAL_WINDOW_SIZE changes, + a receiver MUST adjust the size of all stream flow control windows that it maintains by + the difference between the new value and the old value. + + + A change to SETTINGS_INITIAL_WINDOW_SIZE can cause the available space in + a flow control window to become negative. A sender MUST track the negative flow control + window, and MUST NOT send new flow controlled frames until it receives WINDOW_UPDATE + frames that cause the flow control window to become positive. + + + For example, if the client sends 60KB immediately on connection establishment, and the + server sets the initial window size to be 16KB, the client will recalculate the + available flow control window to be -44KB on receipt of the SETTINGS + frame. The client retains a negative flow control window until WINDOW_UPDATE frames + restore the window to being positive, after which the client can resume sending. + + + A SETTINGS frame cannot alter the connection flow control window. + + + An endpoint MUST treat a change to SETTINGS_INITIAL_WINDOW_SIZE that + causes any flow control window to exceed the maximum size as a connection error of type + FLOW_CONTROL_ERROR. + +
+ +
+ + A receiver that wishes to use a smaller flow control window than the current size can + send a new SETTINGS frame. However, the receiver MUST be prepared to + receive data that exceeds this window size, since the sender might send data that + exceeds the lower limit prior to processing the SETTINGS frame. + + + After sending a SETTINGS frame that reduces the initial flow control window size, a + receiver has two options for handling streams that exceed flow control limits: + + + The receiver can immediately send RST_STREAM with + FLOW_CONTROL_ERROR error code for the affected streams. + + + The receiver can accept the streams and tolerate the resulting head of line + blocking, sending WINDOW_UPDATE frames as it consumes data. + + + +
+
+ +
+ + The CONTINUATION frame (type=0x9) is used to continue a sequence of header block fragments. Any number of CONTINUATION frames can + be sent on an existing stream, as long as the preceding frame is on the same stream and is + a HEADERS, PUSH_PROMISE or CONTINUATION frame without the + END_HEADERS flag set. + + +
+ +
+ + The CONTINUATION frame payload contains a header block + fragment. + + + + The CONTINUATION frame defines the following flag: + + + + Bit 3 being set indicates that this frame ends a header + block. + + + If the END_HEADERS bit is not set, this frame MUST be followed by another + CONTINUATION frame. A receiver MUST treat the receipt of any other type of frame or + a frame on a different stream as a connection + error of type PROTOCOL_ERROR. + + + + + + + The CONTINUATION frame changes the connection state as defined in . + + + + CONTINUATION frames MUST be associated with a stream. If a CONTINUATION frame is received + whose stream identifier field is 0x0, the recipient MUST respond with a connection error of type PROTOCOL_ERROR. + + + + A CONTINUATION frame MUST be preceded by a HEADERS, + PUSH_PROMISE or CONTINUATION frame without the END_HEADERS flag set. A + recipient that observes violation of this rule MUST respond with a connection error of type + PROTOCOL_ERROR. + +
+
+ +
+ + Error codes are 32-bit fields that are used in RST_STREAM and + GOAWAY frames to convey the reasons for the stream or connection error. + + + + Error codes share a common code space. Some error codes apply only to either streams or the + entire connection and have no defined semantics in the other context. + + + + The following error codes are defined: + + + The associated condition is not as a result of an error. For example, a + GOAWAY might include this code to indicate graceful shutdown of a + connection. + + + The endpoint detected an unspecific protocol error. This error is for use when a more + specific error code is not available. + + + The endpoint encountered an unexpected internal error. + + + The endpoint detected that its peer violated the flow control protocol. + + + The endpoint sent a SETTINGS frame, but did not receive a response in a + timely manner. See Settings Synchronization. + + + The endpoint received a frame after a stream was half closed. + + + The endpoint received a frame with an invalid size. + + + The endpoint refuses the stream prior to performing any application processing, see + for details. + + + Used by the endpoint to indicate that the stream is no longer needed. + + + The endpoint is unable to maintain the header compression context for the connection. + + + The connection established in response to a CONNECT + request was reset or abnormally closed. + + + The endpoint detected that its peer is exhibiting a behavior that might be generating + excessive load. + + + The underlying transport has properties that do not meet minimum security + requirements (see ). + + + + + Unknown or unsupported error codes MUST NOT trigger any special behavior. These MAY be + treated by an implementation as being equivalent to INTERNAL_ERROR. + +
+ +
+ + HTTP/2 is intended to be as compatible as possible with current uses of HTTP. This means + that, from the application perspective, the features of the protocol are largely + unchanged. To achieve this, all request and response semantics are preserved, although the + syntax of conveying those semantics has changed. + + + Thus, the specification and requirements of HTTP/1.1 Semantics and Content , Conditional Requests , Range Requests , Caching and Authentication are applicable to HTTP/2. Selected portions of HTTP/1.1 Message Syntax + and Routing , such as the HTTP and HTTPS URI schemes, are also + applicable in HTTP/2, but the expression of those semantics for this protocol are defined + in the sections below. + + +
+ + A client sends an HTTP request on a new stream, using a previously unused stream identifier. A server sends an HTTP response on + the same stream as the request. + + + An HTTP message (request or response) consists of: + + + for a response only, zero or more HEADERS frames (each followed by zero + or more CONTINUATION frames) containing the message headers of + informational (1xx) HTTP responses (see and ), + and + + + one HEADERS frame (followed by zero or more CONTINUATION + frames) containing the message headers (see ), and + + + zero or more DATA frames containing the message payload (see ), and + + + optionally, one HEADERS frame, followed by zero or more + CONTINUATION frames containing the trailer-part, if present (see ). + + + The last frame in the sequence bears an END_STREAM flag, noting that a + HEADERS frame bearing the END_STREAM flag can be followed by + CONTINUATION frames that carry any remaining portions of the header block. + + + Other frames (from any stream) MUST NOT occur between either HEADERS frame + and any CONTINUATION frames that might follow. + + + + Trailing header fields are carried in a header block that also terminates the stream. + That is, a sequence starting with a HEADERS frame, followed by zero or more + CONTINUATION frames, where the HEADERS frame bears an + END_STREAM flag. Header blocks after the first that do not terminate the stream are not + part of an HTTP request or response. + + + A HEADERS frame (and associated CONTINUATION frames) can + only appear at the start or end of a stream. An endpoint that receives a + HEADERS frame without the END_STREAM flag set after receiving a final + (non-informational) status code MUST treat the corresponding request or response as malformed. + + + + An HTTP request/response exchange fully consumes a single stream. A request starts with + the HEADERS frame that puts the stream into an "open" state. The request + ends with a frame bearing END_STREAM, which causes the stream to become "half closed + (local)" for the client and "half closed (remote)" for the server. A response starts with + a HEADERS frame and ends with a frame bearing END_STREAM, which places the + stream in the "closed" state. + + + +
+ + HTTP/2 removes support for the 101 (Switching Protocols) informational status code + (). + + + The semantics of 101 (Switching Protocols) aren't applicable to a multiplexed protocol. + Alternative protocols are able to use the same mechanisms that HTTP/2 uses to negotiate + their use (see ). + +
+ +
+ + HTTP header fields carry information as a series of key-value pairs. For a listing of + registered HTTP headers, see the Message Header Field Registry maintained at . + + +
+ + While HTTP/1.x used the message start-line (see ) to convey the target URI and method of the request, and the + status code for the response, HTTP/2 uses special pseudo-header fields beginning with + ':' character (ASCII 0x3a) for this purpose. + + + Pseudo-header fields are not HTTP header fields. Endpoints MUST NOT generate + pseudo-header fields other than those defined in this document. + + + Pseudo-header fields are only valid in the context in which they are defined. + Pseudo-header fields defined for requests MUST NOT appear in responses; pseudo-header + fields defined for responses MUST NOT appear in requests. Pseudo-header fields MUST + NOT appear in trailers. Endpoints MUST treat a request or response that contains + undefined or invalid pseudo-header fields as malformed. + + + Just as in HTTP/1.x, header field names are strings of ASCII characters that are + compared in a case-insensitive fashion. However, header field names MUST be converted + to lowercase prior to their encoding in HTTP/2. A request or response containing + uppercase header field names MUST be treated as malformed. + + + All pseudo-header fields MUST appear in the header block before regular header fields. + Any request or response that contains a pseudo-header field that appears in a header + block after a regular header field MUST be treated as malformed. + +
+ +
+ + HTTP/2 does not use the Connection header field to + indicate connection-specific header fields; in this protocol, connection-specific + metadata is conveyed by other means. An endpoint MUST NOT generate a HTTP/2 message + containing connection-specific header fields; any message containing + connection-specific header fields MUST be treated as malformed. + + + This means that an intermediary transforming an HTTP/1.x message to HTTP/2 will need + to remove any header fields nominated by the Connection header field, along with the + Connection header field itself. Such intermediaries SHOULD also remove other + connection-specific header fields, such as Keep-Alive, Proxy-Connection, + Transfer-Encoding and Upgrade, even if they are not nominated by Connection. + + + One exception to this is the TE header field, which MAY be present in an HTTP/2 + request, but when it is MUST NOT contain any value other than "trailers". + + + + + HTTP/2 purposefully does not support upgrade to another protocol. The handshake + methods described in are believed sufficient to + negotiate the use of alternative protocols. + + + +
+ +
+ + The following pseudo-header fields are defined for HTTP/2 requests: + + + + The :method pseudo-header field includes the HTTP + method (). + + + + + The :scheme pseudo-header field includes the scheme + portion of the target URI (). + + + :scheme is not restricted to http and https schemed URIs. A + proxy or gateway can translate requests for non-HTTP schemes, enabling the use + of HTTP to interact with non-HTTP services. + + + + + The :authority pseudo-header field includes the + authority portion of the target URI (). The authority MUST NOT include the deprecated userinfo subcomponent for http + or https schemed URIs. + + + To ensure that the HTTP/1.1 request line can be reproduced accurately, this + pseudo-header field MUST be omitted when translating from an HTTP/1.1 request + that has a request target in origin or asterisk form (see ). Clients that generate + HTTP/2 requests directly SHOULD use the :authority pseudo-header + field instead of the Host header field. An + intermediary that converts an HTTP/2 request to HTTP/1.1 MUST create a Host header field if one is not present in a request by + copying the value of the :authority pseudo-header + field. + + + + + The :path pseudo-header field includes the path and + query parts of the target URI (the path-absolute + production from and optionally a '?' character + followed by the query production, see and ). A request in asterisk form includes the value '*' for the + :path pseudo-header field. + + + This pseudo-header field MUST NOT be empty for http + or https URIs; http or + https URIs that do not contain a path component + MUST include a value of '/'. The exception to this rule is an OPTIONS request + for an http or https + URI that does not include a path component; these MUST include a :path pseudo-header field with a value of '*' (see ). + + + + + + All HTTP/2 requests MUST include exactly one valid value for the :method, :scheme, and :path pseudo-header fields, unless it is a CONNECT request. An HTTP request that omits mandatory + pseudo-header fields is malformed. + + + HTTP/2 does not define a way to carry the version identifier that is included in the + HTTP/1.1 request line. + +
+ +
+ + For HTTP/2 responses, a single :status pseudo-header + field is defined that carries the HTTP status code field (see ). This pseudo-header field MUST be included in all + responses, otherwise the response is malformed. + + + HTTP/2 does not define a way to carry the version or reason phrase that is included in + an HTTP/1.1 status line. + +
+ +
+ + The Cookie header field can carry a significant amount of + redundant data. + + + The Cookie header field uses a semi-colon (";") to delimit cookie-pairs (or "crumbs"). + This header field doesn't follow the list construction rules in HTTP (see ), which prevents cookie-pairs from + being separated into different name-value pairs. This can significantly reduce + compression efficiency as individual cookie-pairs are updated. + + + To allow for better compression efficiency, the Cookie header field MAY be split into + separate header fields, each with one or more cookie-pairs. If there are multiple + Cookie header fields after decompression, these MUST be concatenated into a single + octet string using the two octet delimiter of 0x3B, 0x20 (the ASCII string "; ") + before being passed into a non-HTTP/2 context, such as an HTTP/1.1 connection, or a + generic HTTP server application. + +
+ + Therefore, the following two lists of Cookie header fields are semantically + equivalent. + + +
+
+ +
+ + A malformed request or response is one that is an otherwise valid sequence of HTTP/2 + frames, but is otherwise invalid due to the presence of extraneous frames, prohibited + header fields, the absence of mandatory header fields, or the inclusion of uppercase + header field names. + + + A request or response that includes an entity body can include a content-length header field. A request or response is also + malformed if the value of a content-length header field + does not equal the sum of the DATA frame payload lengths that form the + body. A response that is defined to have no payload, as described in , can have a non-zero + content-length header field, even though no content is + included in DATA frames. + + + Intermediaries that process HTTP requests or responses (i.e., any intermediary not + acting as a tunnel) MUST NOT forward a malformed request or response. Malformed + requests or responses that are detected MUST be treated as a stream error of type PROTOCOL_ERROR. + + + For malformed requests, a server MAY send an HTTP response prior to closing or + resetting the stream. Clients MUST NOT accept a malformed response. Note that these + requirements are intended to protect against several types of common attacks against + HTTP; they are deliberately strict, because being permissive can expose + implementations to these vulnerabilities. + +
+
+ +
+ + This section shows HTTP/1.1 requests and responses, with illustrations of equivalent + HTTP/2 requests and responses. + + + An HTTP GET request includes request header fields and no body and is therefore + transmitted as a single HEADERS frame, followed by zero or more + CONTINUATION frames containing the serialized block of request header + fields. The HEADERS frame in the following has both the END_HEADERS and + END_STREAM flags set; no CONTINUATION frames are sent: + + +
+ + END_STREAM + Accept: image/jpeg + END_HEADERS + :method = GET + :scheme = https + :path = /resource + host = example.org + accept = image/jpeg +]]> +
+ + + Similarly, a response that includes only response header fields is transmitted as a + HEADERS frame (again, followed by zero or more + CONTINUATION frames) containing the serialized block of response header + fields. + + +
+ + END_STREAM + Expires: Thu, 23 Jan ... + END_HEADERS + :status = 304 + etag = "xyzzy" + expires = Thu, 23 Jan ... +]]> +
+ + + An HTTP POST request that includes request header fields and payload data is transmitted + as one HEADERS frame, followed by zero or more + CONTINUATION frames containing the request header fields, followed by one + or more DATA frames, with the last CONTINUATION (or + HEADERS) frame having the END_HEADERS flag set and the final + DATA frame having the END_STREAM flag set: + + +
+ - END_STREAM + Content-Type: image/jpeg - END_HEADERS + Content-Length: 123 :method = POST + :path = /resource + {binary data} :scheme = https + + CONTINUATION + + END_HEADERS + content-type = image/jpeg + host = example.org + content-length = 123 + + DATA + + END_STREAM + {binary data} +]]> + + Note that data contributing to any given header field could be spread between header + block fragments. The allocation of header fields to frames in this example is + illustrative only. + +
+ + + A response that includes header fields and payload data is transmitted as a + HEADERS frame, followed by zero or more CONTINUATION + frames, followed by one or more DATA frames, with the last + DATA frame in the sequence having the END_STREAM flag set: + + +
+ - END_STREAM + Content-Length: 123 + END_HEADERS + :status = 200 + {binary data} content-type = image/jpeg + content-length = 123 + + DATA + + END_STREAM + {binary data} +]]> +
+ + + Trailing header fields are sent as a header block after both the request or response + header block and all the DATA frames have been sent. The + HEADERS frame starting the trailers header block has the END_STREAM flag + set. + + +
+ - END_STREAM + Transfer-Encoding: chunked + END_HEADERS + Trailer: Foo :status = 200 + content-length = 123 + 123 content-type = image/jpeg + {binary data} trailer = Foo + 0 + Foo: bar DATA + - END_STREAM + {binary data} + + HEADERS + + END_STREAM + + END_HEADERS + foo = bar +]]> +
+ + +
+ + An informational response using a 1xx status code other than 101 is transmitted as a + HEADERS frame, followed by zero or more CONTINUATION + frames: + + - END_STREAM + + END_HEADERS + :status = 103 + extension-field = bar +]]> +
+
+ +
+ + In HTTP/1.1, an HTTP client is unable to retry a non-idempotent request when an error + occurs, because there is no means to determine the nature of the error. It is possible + that some server processing occurred prior to the error, which could result in + undesirable effects if the request were reattempted. + + + HTTP/2 provides two mechanisms for providing a guarantee to a client that a request has + not been processed: + + + The GOAWAY frame indicates the highest stream number that might have + been processed. Requests on streams with higher numbers are therefore guaranteed to + be safe to retry. + + + The REFUSED_STREAM error code can be included in a + RST_STREAM frame to indicate that the stream is being closed prior to + any processing having occurred. Any request that was sent on the reset stream can + be safely retried. + + + + + Requests that have not been processed have not failed; clients MAY automatically retry + them, even those with non-idempotent methods. + + + A server MUST NOT indicate that a stream has not been processed unless it can guarantee + that fact. If frames that are on a stream are passed to the application layer for any + stream, then REFUSED_STREAM MUST NOT be used for that stream, and a + GOAWAY frame MUST include a stream identifier that is greater than or + equal to the given stream identifier. + + + In addition to these mechanisms, the PING frame provides a way for a + client to easily test a connection. Connections that remain idle can become broken as + some middleboxes (for instance, network address translators, or load balancers) silently + discard connection bindings. The PING frame allows a client to safely + test whether a connection is still active without sending a request. + +
+
+ +
+ + HTTP/2 allows a server to pre-emptively send (or "push") responses (along with + corresponding "promised" requests) to a client in association with a previous + client-initiated request. This can be useful when the server knows the client will need + to have those responses available in order to fully process the response to the original + request. + + + + Pushing additional message exchanges in this fashion is optional, and is negotiated + between individual endpoints. The SETTINGS_ENABLE_PUSH setting can be set + to 0 to indicate that server push is disabled. + + + Promised requests MUST be cacheable (see ), MUST be safe (see ) and MUST NOT include a request body. Clients that receive a + promised request that is not cacheable, unsafe or that includes a request body MUST + reset the stream with a stream error of type + PROTOCOL_ERROR. + + + Pushed responses that are cacheable (see ) can be stored by the client, if it implements a HTTP + cache. Pushed responses are considered successfully validated on the origin server (e.g., + if the "no-cache" cache response directive is present) while the stream identified by the + promised stream ID is still open. + + + Pushed responses that are not cacheable MUST NOT be stored by any HTTP cache. They MAY + be made available to the application separately. + + + An intermediary can receive pushes from the server and choose not to forward them on to + the client. In other words, how to make use of the pushed information is up to that + intermediary. Equally, the intermediary might choose to make additional pushes to the + client, without any action taken by the server. + + + A client cannot push. Thus, servers MUST treat the receipt of a + PUSH_PROMISE frame as a connection + error of type PROTOCOL_ERROR. Clients MUST reject any attempt to + change the SETTINGS_ENABLE_PUSH setting to a value other than 0 by treating + the message as a connection error of type + PROTOCOL_ERROR. + + +
+ + Server push is semantically equivalent to a server responding to a request; however, in + this case that request is also sent by the server, as a PUSH_PROMISE + frame. + + + The PUSH_PROMISE frame includes a header block that contains a complete + set of request header fields that the server attributes to the request. It is not + possible to push a response to a request that includes a request body. + + + + Pushed responses are always associated with an explicit request from the client. The + PUSH_PROMISE frames sent by the server are sent on that explicit + request's stream. The PUSH_PROMISE frame also includes a promised stream + identifier, chosen from the stream identifiers available to the server (see ). + + + + The header fields in PUSH_PROMISE and any subsequent + CONTINUATION frames MUST be a valid and complete set of request header fields. The server MUST include a method in + the :method header field that is safe and cacheable. If a + client receives a PUSH_PROMISE that does not include a complete and valid + set of header fields, or the :method header field identifies + a method that is not safe, it MUST respond with a stream error of type PROTOCOL_ERROR. + + + + The server SHOULD send PUSH_PROMISE () + frames prior to sending any frames that reference the promised responses. This avoids a + race where clients issue requests prior to receiving any PUSH_PROMISE + frames. + + + For example, if the server receives a request for a document containing embedded links + to multiple image files, and the server chooses to push those additional images to the + client, sending push promises before the DATA frames that contain the + image links ensures that the client is able to see the promises before discovering + embedded links. Similarly, if the server pushes responses referenced by the header block + (for instance, in Link header fields), sending the push promises before sending the + header block ensures that clients do not request them. + + + + PUSH_PROMISE frames MUST NOT be sent by the client. + + + PUSH_PROMISE frames can be sent by the server in response to any + client-initiated stream, but the stream MUST be in either the "open" or "half closed + (remote)" state with respect to the server. PUSH_PROMISE frames are + interspersed with the frames that comprise a response, though they cannot be + interspersed with HEADERS and CONTINUATION frames that + comprise a single header block. + + + Sending a PUSH_PROMISE frame creates a new stream and puts the stream + into the “reserved (local)” state for the server and the “reserved (remote)” state for + the client. + +
+ +
+ + After sending the PUSH_PROMISE frame, the server can begin delivering the + pushed response as a response on a server-initiated + stream that uses the promised stream identifier. The server uses this stream to + transmit an HTTP response, using the same sequence of frames as defined in . This stream becomes "half closed" + to the client after the initial HEADERS frame is sent. + + + + Once a client receives a PUSH_PROMISE frame and chooses to accept the + pushed response, the client SHOULD NOT issue any requests for the promised response + until after the promised stream has closed. + + + + If the client determines, for any reason, that it does not wish to receive the pushed + response from the server, or if the server takes too long to begin sending the promised + response, the client can send an RST_STREAM frame, using either the + CANCEL or REFUSED_STREAM codes, and referencing the pushed + stream's identifier. + + + A client can use the SETTINGS_MAX_CONCURRENT_STREAMS setting to limit the + number of responses that can be concurrently pushed by a server. Advertising a + SETTINGS_MAX_CONCURRENT_STREAMS value of zero disables server push by + preventing the server from creating the necessary streams. This does not prohibit a + server from sending PUSH_PROMISE frames; clients need to reset any + promised streams that are not wanted. + + + + Clients receiving a pushed response MUST validate that either the server is + authoritative (see ), or the proxy that provided the pushed + response is configured for the corresponding request. For example, a server that offers + a certificate for only the example.com DNS-ID or Common Name + is not permitted to push a response for https://www.example.org/doc. + + + The response for a PUSH_PROMISE stream begins with a + HEADERS frame, which immediately puts the stream into the “half closed + (remote)” state for the server and “half closed (local)” state for the client, and ends + with a frame bearing END_STREAM, which places the stream in the "closed" state. + + + The client never sends a frame with the END_STREAM flag for a server push. + + + +
+ +
+ +
+ + In HTTP/1.x, the pseudo-method CONNECT () is used to convert an HTTP connection into a tunnel to a remote host. + CONNECT is primarily used with HTTP proxies to establish a TLS session with an origin + server for the purposes of interacting with https resources. + + + In HTTP/2, the CONNECT method is used to establish a tunnel over a single HTTP/2 stream to + a remote host, for similar purposes. The HTTP header field mapping works as defined in + Request Header Fields, with a few + differences. Specifically: + + + The :method header field is set to CONNECT. + + + The :scheme and :path header + fields MUST be omitted. + + + The :authority header field contains the host and port to + connect to (equivalent to the authority-form of the request-target of CONNECT + requests, see ). + + + + + A proxy that supports CONNECT establishes a TCP connection to + the server identified in the :authority header field. Once + this connection is successfully established, the proxy sends a HEADERS + frame containing a 2xx series status code to the client, as defined in . + + + After the initial HEADERS frame sent by each peer, all subsequent + DATA frames correspond to data sent on the TCP connection. The payload of + any DATA frames sent by the client is transmitted by the proxy to the TCP + server; data received from the TCP server is assembled into DATA frames by + the proxy. Frame types other than DATA or stream management frames + (RST_STREAM, WINDOW_UPDATE, and PRIORITY) + MUST NOT be sent on a connected stream, and MUST be treated as a stream error if received. + + + The TCP connection can be closed by either peer. The END_STREAM flag on a + DATA frame is treated as being equivalent to the TCP FIN bit. A client is + expected to send a DATA frame with the END_STREAM flag set after receiving + a frame bearing the END_STREAM flag. A proxy that receives a DATA frame + with the END_STREAM flag set sends the attached data with the FIN bit set on the last TCP + segment. A proxy that receives a TCP segment with the FIN bit set sends a + DATA frame with the END_STREAM flag set. Note that the final TCP segment + or DATA frame could be empty. + + + A TCP connection error is signaled with RST_STREAM. A proxy treats any + error in the TCP connection, which includes receiving a TCP segment with the RST bit set, + as a stream error of type + CONNECT_ERROR. Correspondingly, a proxy MUST send a TCP segment with the + RST bit set if it detects an error with the stream or the HTTP/2 connection. + +
+
+ +
+ + This section outlines attributes of the HTTP protocol that improve interoperability, reduce + exposure to known security vulnerabilities, or reduce the potential for implementation + variation. + + +
+ + HTTP/2 connections are persistent. For best performance, it is expected clients will not + close connections until it is determined that no further communication with a server is + necessary (for example, when a user navigates away from a particular web page), or until + the server closes the connection. + + + Clients SHOULD NOT open more than one HTTP/2 connection to a given host and port pair, + where host is derived from a URI, a selected alternative + service, or a configured proxy. + + + A client can create additional connections as replacements, either to replace connections + that are near to exhausting the available stream + identifier space, to refresh the keying material for a TLS connection, or to + replace connections that have encountered errors. + + + A client MAY open multiple connections to the same IP address and TCP port using different + Server Name Indication values or to provide different TLS + client certificates, but SHOULD avoid creating multiple connections with the same + configuration. + + + Servers are encouraged to maintain open connections for as long as possible, but are + permitted to terminate idle connections if necessary. When either endpoint chooses to + close the transport-layer TCP connection, the terminating endpoint SHOULD first send a + GOAWAY () frame so that both endpoints can reliably + determine whether previously sent frames have been processed and gracefully complete or + terminate any necessary remaining tasks. + + +
+ + Connections that are made to an origin servers, either directly or through a tunnel + created using the CONNECT method MAY be reused for + requests with multiple different URI authority components. A connection can be reused + as long as the origin server is authoritative. For + http resources, this depends on the host having resolved to + the same IP address. + + + For https resources, connection reuse additionally depends + on having a certificate that is valid for the host in the URI. An origin server might + offer a certificate with multiple subjectAltName attributes, + or names with wildcards, one of which is valid for the authority in the URI. For + example, a certificate with a subjectAltName of *.example.com might permit the use of the same connection for + requests to URIs starting with https://a.example.com/ and + https://b.example.com/. + + + In some deployments, reusing a connection for multiple origins can result in requests + being directed to the wrong origin server. For example, TLS termination might be + performed by a middlebox that uses the TLS Server Name Indication + (SNI) extension to select an origin server. This means that it is possible + for clients to send confidential information to servers that might not be the intended + target for the request, even though the server is otherwise authoritative. + + + A server that does not wish clients to reuse connections can indicate that it is not + authoritative for a request by sending a 421 (Misdirected Request) status code in response + to the request (see ). + + + A client that is configured to use a proxy over HTTP/2 directs requests to that proxy + through a single connection. That is, all requests sent via a proxy reuse the + connection to the proxy. + +
+ +
+ + The 421 (Misdirected Request) status code indicates that the request was directed at a + server that is not able to produce a response. This can be sent by a server that is not + configured to produce responses for the combination of scheme and authority that are + included in the request URI. + + + Clients receiving a 421 (Misdirected Request) response from a server MAY retry the + request - whether the request method is idempotent or not - over a different connection. + This is possible if a connection is reused () or if an alternative + service is selected (). + + + This status code MUST NOT be generated by proxies. + + + A 421 response is cacheable by default; i.e., unless otherwise indicated by the method + definition or explicit cache controls (see ). + +
+
+ +
+ + Implementations of HTTP/2 MUST support TLS 1.2 for HTTP/2 over + TLS. The general TLS usage guidance in SHOULD be followed, with + some additional restrictions that are specific to HTTP/2. + + + + An implementation of HTTP/2 over TLS MUST use TLS 1.2 or higher with the restrictions on + feature set and cipher suite described in this section. Due to implementation + limitations, it might not be possible to fail TLS negotiation. An endpoint MUST + immediately terminate an HTTP/2 connection that does not meet these minimum requirements + with a connection error of type + INADEQUATE_SECURITY. + + +
+ + The TLS implementation MUST support the Server Name Indication + (SNI) extension to TLS. HTTP/2 clients MUST indicate the target domain name when + negotiating TLS. + + + The TLS implementation MUST disable compression. TLS compression can lead to the + exposure of information that would not otherwise be revealed . + Generic compression is unnecessary since HTTP/2 provides compression features that are + more aware of context and therefore likely to be more appropriate for use for + performance, security or other reasons. + + + The TLS implementation MUST disable renegotiation. An endpoint MUST treat a TLS + renegotiation as a connection error of type + PROTOCOL_ERROR. Note that disabling renegotiation can result in + long-lived connections becoming unusable due to limits on the number of messages the + underlying cipher suite can encipher. + + + A client MAY use renegotiation to provide confidentiality protection for client + credentials offered in the handshake, but any renegotiation MUST occur prior to sending + the connection preface. A server SHOULD request a client certificate if it sees a + renegotiation request immediately after establishing a connection. + + + This effectively prevents the use of renegotiation in response to a request for a + specific protected resource. A future specification might provide a way to support this + use case. + +
+ +
+ + The set of TLS cipher suites that are permitted in HTTP/2 is restricted. HTTP/2 MUST + only be used with cipher suites that have ephemeral key exchange, such as the ephemeral Diffie-Hellman (DHE) or the elliptic curve variant (ECDHE). Ephemeral key exchange MUST + have a minimum size of 2048 bits for DHE or security level of 128 bits for ECDHE. + Clients MUST accept DHE sizes of up to 4096 bits. HTTP MUST NOT be used with cipher + suites that use stream or block ciphers. Authenticated Encryption with Additional Data + (AEAD) modes, such as the Galois Counter Model (GCM) mode for + AES are acceptable. + + + The effect of these restrictions is that TLS 1.2 implementations could have + non-intersecting sets of available cipher suites, since these prevent the use of the + cipher suite that TLS 1.2 makes mandatory. To avoid this problem, implementations of + HTTP/2 that use TLS 1.2 MUST support TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 with P256 . + + + Clients MAY advertise support of cipher suites that are prohibited by the above + restrictions in order to allow for connection to servers that do not support HTTP/2. + This enables a fallback to protocols without these constraints without the additional + latency imposed by using a separate connection for fallback. + +
+
+
+ +
+
+ + HTTP/2 relies on the HTTP/1.1 definition of authority for determining whether a server is + authoritative in providing a given response, see . This relies on local name resolution for the "http" + URI scheme, and the authenticated server identity for the "https" scheme (see ). + +
+ +
+ + In a cross-protocol attack, an attacker causes a client to initiate a transaction in one + protocol toward a server that understands a different protocol. An attacker might be able + to cause the transaction to appear as valid transaction in the second protocol. In + combination with the capabilities of the web context, this can be used to interact with + poorly protected servers in private networks. + + + Completing a TLS handshake with an ALPN identifier for HTTP/2 can be considered sufficient + protection against cross protocol attacks. ALPN provides a positive indication that a + server is willing to proceed with HTTP/2, which prevents attacks on other TLS-based + protocols. + + + The encryption in TLS makes it difficult for attackers to control the data which could be + used in a cross-protocol attack on a cleartext protocol. + + + The cleartext version of HTTP/2 has minimal protection against cross-protocol attacks. + The connection preface contains a string that is + designed to confuse HTTP/1.1 servers, but no special protection is offered for other + protocols. A server that is willing to ignore parts of an HTTP/1.1 request containing an + Upgrade header field in addition to the client connection preface could be exposed to a + cross-protocol attack. + +
+ +
+ + HTTP/2 header field names and values are encoded as sequences of octets with a length + prefix. This enables HTTP/2 to carry any string of octets as the name or value of a + header field. An intermediary that translates HTTP/2 requests or responses into HTTP/1.1 + directly could permit the creation of corrupted HTTP/1.1 messages. An attacker might + exploit this behavior to cause the intermediary to create HTTP/1.1 messages with illegal + header fields, extra header fields, or even new messages that are entirely falsified. + + + Header field names or values that contain characters not permitted by HTTP/1.1, including + carriage return (ASCII 0xd) or line feed (ASCII 0xa) MUST NOT be translated verbatim by an + intermediary, as stipulated in . + + + Translation from HTTP/1.x to HTTP/2 does not produce the same opportunity to an attacker. + Intermediaries that perform translation to HTTP/2 MUST remove any instances of the obs-fold production from header field values. + +
+ +
+ + Pushed responses do not have an explicit request from the client; the request + is provided by the server in the PUSH_PROMISE frame. + + + Caching responses that are pushed is possible based on the guidance provided by the origin + server in the Cache-Control header field. However, this can cause issues if a single + server hosts more than one tenant. For example, a server might offer multiple users each + a small portion of its URI space. + + + Where multiple tenants share space on the same server, that server MUST ensure that + tenants are not able to push representations of resources that they do not have authority + over. Failure to enforce this would allow a tenant to provide a representation that would + be served out of cache, overriding the actual representation that the authoritative tenant + provides. + + + Pushed responses for which an origin server is not authoritative (see + ) are never cached or used. + +
+ +
+ + An HTTP/2 connection can demand a greater commitment of resources to operate than a + HTTP/1.1 connection. The use of header compression and flow control depend on a + commitment of resources for storing a greater amount of state. Settings for these + features ensure that memory commitments for these features are strictly bounded. + + + The number of PUSH_PROMISE frames is not constrained in the same fashion. + A client that accepts server push SHOULD limit the number of streams it allows to be in + the "reserved (remote)" state. Excessive number of server push streams can be treated as + a stream error of type + ENHANCE_YOUR_CALM. + + + Processing capacity cannot be guarded as effectively as state capacity. + + + The SETTINGS frame can be abused to cause a peer to expend additional + processing time. This might be done by pointlessly changing SETTINGS parameters, setting + multiple undefined parameters, or changing the same setting multiple times in the same + frame. WINDOW_UPDATE or PRIORITY frames can be abused to + cause an unnecessary waste of resources. + + + Large numbers of small or empty frames can be abused to cause a peer to expend time + processing frame headers. Note however that some uses are entirely legitimate, such as + the sending of an empty DATA frame to end a stream. + + + Header compression also offers some opportunities to waste processing resources; see for more details on potential abuses. + + + Limits in SETTINGS parameters cannot be reduced instantaneously, which + leaves an endpoint exposed to behavior from a peer that could exceed the new limits. In + particular, immediately after establishing a connection, limits set by a server are not + known to clients and could be exceeded without being an obvious protocol violation. + + + All these features - i.e., SETTINGS changes, small frames, header + compression - have legitimate uses. These features become a burden only when they are + used unnecessarily or to excess. + + + An endpoint that doesn't monitor this behavior exposes itself to a risk of denial of + service attack. Implementations SHOULD track the use of these features and set limits on + their use. An endpoint MAY treat activity that is suspicious as a connection error of type + ENHANCE_YOUR_CALM. + + +
+ + A large header block can cause an implementation to + commit a large amount of state. Header fields that are critical for routing can appear + toward the end of a header block, which prevents streaming of header fields to their + ultimate destination. For this an other reasons, such as ensuring cache correctness, + means that an endpoint might need to buffer the entire header block. Since there is no + hard limit to the size of a header block, some endpoints could be forced commit a large + amount of available memory for header fields. + + + An endpoint can use the SETTINGS_MAX_HEADER_LIST_SIZE to advise peers of + limits that might apply on the size of header blocks. This setting is only advisory, so + endpoints MAY choose to send header blocks that exceed this limit and risk having the + request or response being treated as malformed. This setting specific to a connection, + so any request or response could encounter a hop with a lower, unknown limit. An + intermediary can attempt to avoid this problem by passing on values presented by + different peers, but they are not obligated to do so. + + + A server that receives a larger header block than it is willing to handle can send an + HTTP 431 (Request Header Fields Too Large) status code . A + client can discard responses that it cannot process. The header block MUST be processed + to ensure a consistent connection state, unless the connection is closed. + +
+
+ +
+ + HTTP/2 enables greater use of compression for both header fields () and entity bodies. Compression can allow an attacker to recover + secret data when it is compressed in the same context as data under attacker control. + + + There are demonstrable attacks on compression that exploit the characteristics of the web + (e.g., ). The attacker induces multiple requests containing + varying plaintext, observing the length of the resulting ciphertext in each, which + reveals a shorter length when a guess about the secret is correct. + + + Implementations communicating on a secure channel MUST NOT compress content that includes + both confidential and attacker-controlled data unless separate compression dictionaries + are used for each source of data. Compression MUST NOT be used if the source of data + cannot be reliably determined. Generic stream compression, such as that provided by TLS + MUST NOT be used with HTTP/2 (). + + + Further considerations regarding the compression of header fields are described in . + +
+ +
+ + Padding within HTTP/2 is not intended as a replacement for general purpose padding, such + as might be provided by TLS. Redundant padding could even be + counterproductive. Correct application can depend on having specific knowledge of the + data that is being padded. + + + To mitigate attacks that rely on compression, disabling or limiting compression might be + preferable to padding as a countermeasure. + + + Padding can be used to obscure the exact size of frame content, and is provided to + mitigate specific attacks within HTTP. For example, attacks where compressed content + includes both attacker-controlled plaintext and secret data (see for example, ). + + + Use of padding can result in less protection than might seem immediately obvious. At + best, padding only makes it more difficult for an attacker to infer length information by + increasing the number of frames an attacker has to observe. Incorrectly implemented + padding schemes can be easily defeated. In particular, randomized padding with a + predictable distribution provides very little protection; similarly, padding payloads to a + fixed size exposes information as payload sizes cross the fixed size boundary, which could + be possible if an attacker can control plaintext. + + + Intermediaries SHOULD retain padding for DATA frames, but MAY drop padding + for HEADERS and PUSH_PROMISE frames. A valid reason for an + intermediary to change the amount of padding of frames is to improve the protections that + padding provides. + +
+ +
+ + Several characteristics of HTTP/2 provide an observer an opportunity to correlate actions + of a single client or server over time. This includes the value of settings, the manner + in which flow control windows are managed, the way priorities are allocated to streams, + timing of reactions to stimulus, and handling of any optional features. + + + As far as this creates observable differences in behavior, they could be used as a basis + for fingerprinting a specific client, as defined in . + +
+
+ +
+ + A string for identifying HTTP/2 is entered into the "Application Layer Protocol Negotiation + (ALPN) Protocol IDs" registry established in . + + + This document establishes a registry for frame types, settings, and error codes. These new + registries are entered into a new "Hypertext Transfer Protocol (HTTP) 2 Parameters" section. + + + This document registers the HTTP2-Settings header field for + use in HTTP; and the 421 (Misdirected Request) status code. + + + This document registers the PRI method for use in HTTP, to avoid + collisions with the connection preface. + + +
+ + This document creates two registrations for the identification of HTTP/2 in the + "Application Layer Protocol Negotiation (ALPN) Protocol IDs" registry established in . + + + The "h2" string identifies HTTP/2 when used over TLS: + + HTTP/2 over TLS + 0x68 0x32 ("h2") + This document + + + + The "h2c" string identifies HTTP/2 when used over cleartext TCP: + + HTTP/2 over TCP + 0x68 0x32 0x63 ("h2c") + This document + + +
+ +
+ + This document establishes a registry for HTTP/2 frame type codes. The "HTTP/2 Frame + Type" registry manages an 8-bit space. The "HTTP/2 Frame Type" registry operates under + either of the "IETF Review" or "IESG Approval" policies for + values between 0x00 and 0xef, with values between 0xf0 and 0xff being reserved for + experimental use. + + + New entries in this registry require the following information: + + + A name or label for the frame type. + + + The 8-bit code assigned to the frame type. + + + A reference to a specification that includes a description of the frame layout, + it's semantics and flags that the frame type uses, including any parts of the frame + that are conditionally present based on the value of flags. + + + + + The entries in the following table are registered by this document. + + + Frame Type + Code + Section + DATA0x0 + HEADERS0x1 + PRIORITY0x2 + RST_STREAM0x3 + SETTINGS0x4 + PUSH_PROMISE0x5 + PING0x6 + GOAWAY0x7 + WINDOW_UPDATE0x8 + CONTINUATION0x9 + +
+ +
+ + This document establishes a registry for HTTP/2 settings. The "HTTP/2 Settings" registry + manages a 16-bit space. The "HTTP/2 Settings" registry operates under the "Expert Review" policy for values in the range from 0x0000 to + 0xefff, with values between and 0xf000 and 0xffff being reserved for experimental use. + + + New registrations are advised to provide the following information: + + + A symbolic name for the setting. Specifying a setting name is optional. + + + The 16-bit code assigned to the setting. + + + An initial value for the setting. + + + An optional reference to a specification that describes the use of the setting. + + + + + An initial set of setting registrations can be found in . + + + Name + Code + Initial Value + Specification + HEADER_TABLE_SIZE + 0x14096 + ENABLE_PUSH + 0x21 + MAX_CONCURRENT_STREAMS + 0x3(infinite) + INITIAL_WINDOW_SIZE + 0x465535 + MAX_FRAME_SIZE + 0x516384 + MAX_HEADER_LIST_SIZE + 0x6(infinite) + + +
+ +
+ + This document establishes a registry for HTTP/2 error codes. The "HTTP/2 Error Code" + registry manages a 32-bit space. The "HTTP/2 Error Code" registry operates under the + "Expert Review" policy. + + + Registrations for error codes are required to include a description of the error code. An + expert reviewer is advised to examine new registrations for possible duplication with + existing error codes. Use of existing registrations is to be encouraged, but not + mandated. + + + New registrations are advised to provide the following information: + + + A name for the error code. Specifying an error code name is optional. + + + The 32-bit error code value. + + + A brief description of the error code semantics, longer if no detailed specification + is provided. + + + An optional reference for a specification that defines the error code. + + + + + The entries in the following table are registered by this document. + + + Name + Code + Description + Specification + NO_ERROR0x0 + Graceful shutdown + + PROTOCOL_ERROR0x1 + Protocol error detected + + INTERNAL_ERROR0x2 + Implementation fault + + FLOW_CONTROL_ERROR0x3 + Flow control limits exceeded + + SETTINGS_TIMEOUT0x4 + Settings not acknowledged + + STREAM_CLOSED0x5 + Frame received for closed stream + + FRAME_SIZE_ERROR0x6 + Frame size incorrect + + REFUSED_STREAM0x7 + Stream not processed + + CANCEL0x8 + Stream cancelled + + COMPRESSION_ERROR0x9 + Compression state not updated + + CONNECT_ERROR0xa + TCP connection error for CONNECT method + + ENHANCE_YOUR_CALM0xb + Processing capacity exceeded + + INADEQUATE_SECURITY0xc + Negotiated TLS parameters not acceptable + + + +
+ +
+ + This section registers the HTTP2-Settings header field in the + Permanent Message Header Field Registry. + + + HTTP2-Settings + + + http + + + standard + + + IETF + + + of this document + + + This header field is only used by an HTTP/2 client for Upgrade-based negotiation. + + + +
+ +
+ + This section registers the PRI method in the HTTP Method + Registry (). + + + PRI + + + No + + + No + + + of this document + + + This method is never used by an actual client. This method will appear to be used + when an HTTP/1.1 server or intermediary attempts to parse an HTTP/2 connection + preface. + + + +
+ +
+ + This document registers the 421 (Misdirected Request) HTTP Status code in the Hypertext + Transfer Protocol (HTTP) Status Code Registry (). + + + + + 421 + + + Misdirected Request + + + of this document + + + +
+ +
+ +
+ + This document includes substantial input from the following individuals: + + + Adam Langley, Wan-Teh Chang, Jim Morrison, Mark Nottingham, Alyssa Wilk, Costin + Manolache, William Chan, Vitaliy Lvin, Joe Chan, Adam Barth, Ryan Hamilton, Gavin + Peters, Kent Alstad, Kevin Lindsay, Paul Amer, Fan Yang, Jonathan Leighton (SPDY + contributors). + + + Gabriel Montenegro and Willy Tarreau (Upgrade mechanism). + + + William Chan, Salvatore Loreto, Osama Mazahir, Gabriel Montenegro, Jitu Padhye, Roberto + Peon, Rob Trace (Flow control). + + + Mike Bishop (Extensibility). + + + Mark Nottingham, Julian Reschke, James Snell, Jeff Pinner, Mike Bishop, Herve Ruellan + (Substantial editorial contributions). + + + Kari Hurtta, Tatsuhiro Tsujikawa, Greg Wilkins, Poul-Henning Kamp. + + + Alexey Melnikov was an editor of this document during 2013. + + + A substantial proportion of Martin's contribution was supported by Microsoft during his + employment there. + + + +
+
+ + + + + + HPACK - Header Compression for HTTP/2 + + + + + + + + + + + + Transmission Control Protocol + + + University of Southern California (USC)/Information Sciences + Institute + + + + + + + + + + + Key words for use in RFCs to Indicate Requirement Levels + + + Harvard University +
sob@harvard.edu
+
+ +
+ + +
+ + + + + HTTP Over TLS + + + + + + + + + + Uniform Resource Identifier (URI): Generic + Syntax + + + + + + + + + + + + The Base16, Base32, and Base64 Data Encodings + + + + + + + + + Guidelines for Writing an IANA Considerations Section in RFCs + + + + + + + + + + + Augmented BNF for Syntax Specifications: ABNF + + + + + + + + + + + The Transport Layer Security (TLS) Protocol Version 1.2 + + + + + + + + + + + Transport Layer Security (TLS) Extensions: Extension Definitions + + + + + + + + + + Transport Layer Security (TLS) Application-Layer Protocol Negotiation Extension + + + + + + + + + + + + + TLS Elliptic Curve Cipher Suites with SHA-256/384 and AES Galois + Counter Mode (GCM) + + + + + + + + + + + Digital Signature Standard (DSS) + + NIST + + + + + + + + + Hypertext Transfer Protocol (HTTP/1.1): Message Syntax and Routing + + Adobe Systems Incorporated +
fielding@gbiv.com
+
+ + greenbytes GmbH +
julian.reschke@greenbytes.de
+
+ +
+ + +
+ + + + Hypertext Transfer Protocol (HTTP/1.1): Semantics and Content + + Adobe Systems Incorporated +
fielding@gbiv.com
+
+ + greenbytes GmbH +
julian.reschke@greenbytes.de
+
+ +
+ + +
+ + + Hypertext Transfer Protocol (HTTP/1.1): Conditional Requests + + Adobe Systems Incorporated +
fielding@gbiv.com
+
+ + greenbytes GmbH +
julian.reschke@greenbytes.de
+
+ +
+ +
+ + + Hypertext Transfer Protocol (HTTP/1.1): Range Requests + + Adobe Systems Incorporated +
fielding@gbiv.com
+
+ + World Wide Web Consortium +
ylafon@w3.org
+
+ + greenbytes GmbH +
julian.reschke@greenbytes.de
+
+ +
+ +
+ + + Hypertext Transfer Protocol (HTTP/1.1): Caching + + Adobe Systems Incorporated +
fielding@gbiv.com
+
+ + Akamai +
mnot@mnot.net
+
+ + greenbytes GmbH +
julian.reschke@greenbytes.de
+
+ +
+ + +
+ + + Hypertext Transfer Protocol (HTTP/1.1): Authentication + + Adobe Systems Incorporated +
fielding@gbiv.com
+
+ + greenbytes GmbH +
julian.reschke@greenbytes.de
+
+ +
+ + +
+ + + + HTTP State Management Mechanism + + + + + +
+ + + + + + TCP Extensions for High Performance + + + + + + + + + + + + Transport Layer Security Protocol Compression Methods + + + + + + + + + Additional HTTP Status Codes + + + + + + + + + + + Elliptic Curve Cryptography (ECC) Cipher Suites for Transport Layer Security (TLS) + + + + + + + + + + + + + + + AES Galois Counter Mode (GCM) Cipher Suites for TLS + + + + + + + + + + + + HTML5 + + + + + + + + + + + Latest version available at + . + + + + + + + Talking to Yourself for Fun and Profit + + + + + + + + + + + + + + BREACH: Reviving the CRIME Attack + + + + + + + + + + + Registration Procedures for Message Header Fields + + Nine by Nine +
GK-IETF@ninebynine.org
+
+ + BEA Systems +
mnot@pobox.com
+
+ + HP Labs +
JeffMogul@acm.org
+
+ +
+ + +
+ + + + Recommendations for Secure Use of TLS and DTLS + + + + + + + + + + + + + + + + + + HTTP Alternative Services + + + Akamai + + + Mozilla + + + greenbytes + + + + + + +
+ +
+ + This section is to be removed by RFC Editor before publication. + + +
+ + Renamed Not Authoritative status code to Misdirected Request. + +
+ +
+ + Pseudo-header fields are now required to appear strictly before regular ones. + + + Restored 1xx series status codes, except 101. + + + Changed frame length field 24-bits. Expanded frame header to 9 octets. Added a setting + to limit the damage. + + + Added a setting to advise peers of header set size limits. + + + Removed segments. + + + Made non-semantic-bearing HEADERS frames illegal in the HTTP mapping. + +
+ +
+ + Restored extensibility options. + + + Restricting TLS cipher suites to AEAD only. + + + Removing Content-Encoding requirements. + + + Permitting the use of PRIORITY after stream close. + + + Removed ALTSVC frame. + + + Removed BLOCKED frame. + + + Reducing the maximum padding size to 256 octets; removing padding from + CONTINUATION frames. + + + Removed per-frame GZIP compression. + +
+ +
+ + Added BLOCKED frame (at risk). + + + Simplified priority scheme. + + + Added DATA per-frame GZIP compression. + +
+ +
+ + Changed "connection header" to "connection preface" to avoid confusion. + + + Added dependency-based stream prioritization. + + + Added "h2c" identifier to distinguish between cleartext and secured HTTP/2. + + + Adding missing padding to PUSH_PROMISE. + + + Integrate ALTSVC frame and supporting text. + + + Dropping requirement on "deflate" Content-Encoding. + + + Improving security considerations around use of compression. + +
+ +
+ + Adding padding for data frames. + + + Renumbering frame types, error codes, and settings. + + + Adding INADEQUATE_SECURITY error code. + + + Updating TLS usage requirements to 1.2; forbidding TLS compression. + + + Removing extensibility for frames and settings. + + + Changing setting identifier size. + + + Removing the ability to disable flow control. + + + Changing the protocol identification token to "h2". + + + Changing the use of :authority to make it optional and to allow userinfo in non-HTTP + cases. + + + Allowing split on 0x0 for Cookie. + + + Reserved PRI method in HTTP/1.1 to avoid possible future collisions. + +
+ +
+ + Added cookie crumbling for more efficient header compression. + + + Added header field ordering with the value-concatenation mechanism. + +
+ +
+ + Marked draft for implementation. + +
+ +
+ + Adding definition for CONNECT method. + + + Constraining the use of push to safe, cacheable methods with no request body. + + + Changing from :host to :authority to remove any potential confusion. + + + Adding setting for header compression table size. + + + Adding settings acknowledgement. + + + Removing unnecessary and potentially problematic flags from CONTINUATION. + + + Added denial of service considerations. + +
+
+ + Marking the draft ready for implementation. + + + Renumbering END_PUSH_PROMISE flag. + + + Editorial clarifications and changes. + +
+ +
+ + Added CONTINUATION frame for HEADERS and PUSH_PROMISE. + + + PUSH_PROMISE is no longer implicitly prohibited if SETTINGS_MAX_CONCURRENT_STREAMS is + zero. + + + Push expanded to allow all safe methods without a request body. + + + Clarified the use of HTTP header fields in requests and responses. Prohibited HTTP/1.1 + hop-by-hop header fields. + + + Requiring that intermediaries not forward requests with missing or illegal routing + :-headers. + + + Clarified requirements around handling different frames after stream close, stream reset + and GOAWAY. + + + Added more specific prohibitions for sending of different frame types in various stream + states. + + + Making the last received setting value the effective value. + + + Clarified requirements on TLS version, extension and ciphers. + +
+ +
+ + Committed major restructuring atrocities. + + + Added reference to first header compression draft. + + + Added more formal description of frame lifecycle. + + + Moved END_STREAM (renamed from FINAL) back to HEADERS/DATA. + + + Removed HEADERS+PRIORITY, added optional priority to HEADERS frame. + + + Added PRIORITY frame. + +
+ +
+ + Added continuations to frames carrying header blocks. + + + Replaced use of "session" with "connection" to avoid confusion with other HTTP stateful + concepts, like cookies. + + + Removed "message". + + + Switched to TLS ALPN from NPN. + + + Editorial changes. + +
+ +
+ + Added IANA considerations section for frame types, error codes and settings. + + + Removed data frame compression. + + + Added PUSH_PROMISE. + + + Added globally applicable flags to framing. + + + Removed zlib-based header compression mechanism. + + + Updated references. + + + Clarified stream identifier reuse. + + + Removed CREDENTIALS frame and associated mechanisms. + + + Added advice against naive implementation of flow control. + + + Added session header section. + + + Restructured frame header. Removed distinction between data and control frames. + + + Altered flow control properties to include session-level limits. + + + Added note on cacheability of pushed resources and multiple tenant servers. + + + Changed protocol label form based on discussions. + +
+ +
+ + Changed title throughout. + + + Removed section on Incompatibilities with SPDY draft#2. + + + Changed INTERNAL_ERROR on GOAWAY to have a value of 2 . + + + Replaced abstract and introduction. + + + Added section on starting HTTP/2.0, including upgrade mechanism. + + + Removed unused references. + + + Added flow control principles based on . + +
+ +
+ + Adopted as base for draft-ietf-httpbis-http2. + + + Updated authors/editors list. + + + Added status note. + +
+
+ +
+
+ diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/transport.go b/Godeps/_workspace/src/github.com/bradfitz/http2/transport.go new file mode 100644 index 000000000..ea62188db --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/transport.go @@ -0,0 +1,553 @@ +// Copyright 2015 The Go Authors. +// See https://go.googlesource.com/go/+/master/CONTRIBUTORS +// Licensed under the same terms as Go itself: +// https://go.googlesource.com/go/+/master/LICENSE + +package http2 + +import ( + "bufio" + "bytes" + "crypto/tls" + "errors" + "fmt" + "io" + "log" + "net" + "net/http" + "strconv" + "strings" + "sync" + + "github.com/bradfitz/http2/hpack" +) + +type Transport struct { + Fallback http.RoundTripper + + // TODO: remove this and make more general with a TLS dial hook, like http + InsecureTLSDial bool + + connMu sync.Mutex + conns map[string][]*clientConn // key is host:port +} + +type clientConn struct { + t *Transport + tconn *tls.Conn + tlsState *tls.ConnectionState + connKey []string // key(s) this connection is cached in, in t.conns + + readerDone chan struct{} // closed on error + readerErr error // set before readerDone is closed + hdec *hpack.Decoder + nextRes *http.Response + + mu sync.Mutex + closed bool + goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received + streams map[uint32]*clientStream + nextStreamID uint32 + bw *bufio.Writer + werr error // first write error that has occurred + br *bufio.Reader + fr *Framer + // Settings from peer: + maxFrameSize uint32 + maxConcurrentStreams uint32 + initialWindowSize uint32 + hbuf bytes.Buffer // HPACK encoder writes into this + henc *hpack.Encoder +} + +type clientStream struct { + ID uint32 + resc chan resAndError + pw *io.PipeWriter + pr *io.PipeReader +} + +type stickyErrWriter struct { + w io.Writer + err *error +} + +func (sew stickyErrWriter) Write(p []byte) (n int, err error) { + if *sew.err != nil { + return 0, *sew.err + } + n, err = sew.w.Write(p) + *sew.err = err + return +} + +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + if req.URL.Scheme != "https" { + if t.Fallback == nil { + return nil, errors.New("http2: unsupported scheme and no Fallback") + } + return t.Fallback.RoundTrip(req) + } + + host, port, err := net.SplitHostPort(req.URL.Host) + if err != nil { + host = req.URL.Host + port = "443" + } + + for { + cc, err := t.getClientConn(host, port) + if err != nil { + return nil, err + } + res, err := cc.roundTrip(req) + if shouldRetryRequest(err) { // TODO: or clientconn is overloaded (too many outstanding requests)? + continue + } + if err != nil { + return nil, err + } + return res, nil + } +} + +// CloseIdleConnections closes any connections which were previously +// connected from previous requests but are now sitting idle. +// It does not interrupt any connections currently in use. +func (t *Transport) CloseIdleConnections() { + t.connMu.Lock() + defer t.connMu.Unlock() + for _, vv := range t.conns { + for _, cc := range vv { + cc.closeIfIdle() + } + } +} + +var errClientConnClosed = errors.New("http2: client conn is closed") + +func shouldRetryRequest(err error) bool { + // TODO: or GOAWAY graceful shutdown stuff + return err == errClientConnClosed +} + +func (t *Transport) removeClientConn(cc *clientConn) { + t.connMu.Lock() + defer t.connMu.Unlock() + for _, key := range cc.connKey { + vv, ok := t.conns[key] + if !ok { + continue + } + newList := filterOutClientConn(vv, cc) + if len(newList) > 0 { + t.conns[key] = newList + } else { + delete(t.conns, key) + } + } +} + +func filterOutClientConn(in []*clientConn, exclude *clientConn) []*clientConn { + out := in[:0] + for _, v := range in { + if v != exclude { + out = append(out, v) + } + } + return out +} + +func (t *Transport) getClientConn(host, port string) (*clientConn, error) { + t.connMu.Lock() + defer t.connMu.Unlock() + + key := net.JoinHostPort(host, port) + + for _, cc := range t.conns[key] { + if cc.canTakeNewRequest() { + return cc, nil + } + } + if t.conns == nil { + t.conns = make(map[string][]*clientConn) + } + cc, err := t.newClientConn(host, port, key) + if err != nil { + return nil, err + } + t.conns[key] = append(t.conns[key], cc) + return cc, nil +} + +func (t *Transport) newClientConn(host, port, key string) (*clientConn, error) { + cfg := &tls.Config{ + ServerName: host, + NextProtos: []string{NextProtoTLS}, + InsecureSkipVerify: t.InsecureTLSDial, + } + tconn, err := tls.Dial("tcp", host+":"+port, cfg) + if err != nil { + return nil, err + } + if err := tconn.Handshake(); err != nil { + return nil, err + } + if !t.InsecureTLSDial { + if err := tconn.VerifyHostname(cfg.ServerName); err != nil { + return nil, err + } + } + state := tconn.ConnectionState() + if p := state.NegotiatedProtocol; p != NextProtoTLS { + // TODO(bradfitz): fall back to Fallback + return nil, fmt.Errorf("bad protocol: %v", p) + } + if !state.NegotiatedProtocolIsMutual { + return nil, errors.New("could not negotiate protocol mutually") + } + if _, err := tconn.Write(clientPreface); err != nil { + return nil, err + } + + cc := &clientConn{ + t: t, + tconn: tconn, + connKey: []string{key}, // TODO: cert's validated hostnames too + tlsState: &state, + readerDone: make(chan struct{}), + nextStreamID: 1, + maxFrameSize: 16 << 10, // spec default + initialWindowSize: 65535, // spec default + maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. + streams: make(map[uint32]*clientStream), + } + cc.bw = bufio.NewWriter(stickyErrWriter{tconn, &cc.werr}) + cc.br = bufio.NewReader(tconn) + cc.fr = NewFramer(cc.bw, cc.br) + cc.henc = hpack.NewEncoder(&cc.hbuf) + + cc.fr.WriteSettings() + // TODO: re-send more conn-level flow control tokens when server uses all these. + cc.fr.WriteWindowUpdate(0, 1<<30) // um, 0x7fffffff doesn't work to Google? it hangs? + cc.bw.Flush() + if cc.werr != nil { + return nil, cc.werr + } + + // Read the obligatory SETTINGS frame + f, err := cc.fr.ReadFrame() + if err != nil { + return nil, err + } + sf, ok := f.(*SettingsFrame) + if !ok { + return nil, fmt.Errorf("expected settings frame, got: %T", f) + } + cc.fr.WriteSettingsAck() + cc.bw.Flush() + + sf.ForeachSetting(func(s Setting) error { + switch s.ID { + case SettingMaxFrameSize: + cc.maxFrameSize = s.Val + case SettingMaxConcurrentStreams: + cc.maxConcurrentStreams = s.Val + case SettingInitialWindowSize: + cc.initialWindowSize = s.Val + default: + // TODO(bradfitz): handle more + log.Printf("Unhandled Setting: %v", s) + } + return nil + }) + // TODO: figure out henc size + cc.hdec = hpack.NewDecoder(initialHeaderTableSize, cc.onNewHeaderField) + + go cc.readLoop() + return cc, nil +} + +func (cc *clientConn) setGoAway(f *GoAwayFrame) { + cc.mu.Lock() + defer cc.mu.Unlock() + cc.goAway = f +} + +func (cc *clientConn) canTakeNewRequest() bool { + cc.mu.Lock() + defer cc.mu.Unlock() + return cc.goAway == nil && + int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) && + cc.nextStreamID < 2147483647 +} + +func (cc *clientConn) closeIfIdle() { + cc.mu.Lock() + if len(cc.streams) > 0 { + cc.mu.Unlock() + return + } + cc.closed = true + // TODO: do clients send GOAWAY too? maybe? Just Close: + cc.mu.Unlock() + + cc.tconn.Close() +} + +func (cc *clientConn) roundTrip(req *http.Request) (*http.Response, error) { + cc.mu.Lock() + + if cc.closed { + cc.mu.Unlock() + return nil, errClientConnClosed + } + + cs := cc.newStream() + hasBody := false // TODO + + // we send: HEADERS[+CONTINUATION] + (DATA?) + hdrs := cc.encodeHeaders(req) + first := true + for len(hdrs) > 0 { + chunk := hdrs + if len(chunk) > int(cc.maxFrameSize) { + chunk = chunk[:cc.maxFrameSize] + } + hdrs = hdrs[len(chunk):] + endHeaders := len(hdrs) == 0 + if first { + cc.fr.WriteHeaders(HeadersFrameParam{ + StreamID: cs.ID, + BlockFragment: chunk, + EndStream: !hasBody, + EndHeaders: endHeaders, + }) + first = false + } else { + cc.fr.WriteContinuation(cs.ID, endHeaders, chunk) + } + } + cc.bw.Flush() + werr := cc.werr + cc.mu.Unlock() + + if hasBody { + // TODO: write data. and it should probably be interleaved: + // go ... io.Copy(dataFrameWriter{cc, cs, ...}, req.Body) ... etc + } + + if werr != nil { + return nil, werr + } + + re := <-cs.resc + if re.err != nil { + return nil, re.err + } + res := re.res + res.Request = req + res.TLS = cc.tlsState + return res, nil +} + +// requires cc.mu be held. +func (cc *clientConn) encodeHeaders(req *http.Request) []byte { + cc.hbuf.Reset() + + // TODO(bradfitz): figure out :authority-vs-Host stuff between http2 and Go + host := req.Host + if host == "" { + host = req.URL.Host + } + + path := req.URL.Path + if path == "" { + path = "/" + } + + cc.writeHeader(":authority", host) // probably not right for all sites + cc.writeHeader(":method", req.Method) + cc.writeHeader(":path", path) + cc.writeHeader(":scheme", "https") + + for k, vv := range req.Header { + lowKey := strings.ToLower(k) + if lowKey == "host" { + continue + } + for _, v := range vv { + cc.writeHeader(lowKey, v) + } + } + return cc.hbuf.Bytes() +} + +func (cc *clientConn) writeHeader(name, value string) { + log.Printf("sending %q = %q", name, value) + cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) +} + +type resAndError struct { + res *http.Response + err error +} + +// requires cc.mu be held. +func (cc *clientConn) newStream() *clientStream { + cs := &clientStream{ + ID: cc.nextStreamID, + resc: make(chan resAndError, 1), + } + cc.nextStreamID += 2 + cc.streams[cs.ID] = cs + return cs +} + +func (cc *clientConn) streamByID(id uint32, andRemove bool) *clientStream { + cc.mu.Lock() + defer cc.mu.Unlock() + cs := cc.streams[id] + if andRemove { + delete(cc.streams, id) + } + return cs +} + +// runs in its own goroutine. +func (cc *clientConn) readLoop() { + defer cc.t.removeClientConn(cc) + defer close(cc.readerDone) + + activeRes := map[uint32]*clientStream{} // keyed by streamID + // Close any response bodies if the server closes prematurely. + // TODO: also do this if we've written the headers but not + // gotten a response yet. + defer func() { + err := cc.readerErr + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + for _, cs := range activeRes { + cs.pw.CloseWithError(err) + } + }() + + // continueStreamID is the stream ID we're waiting for + // continuation frames for. + var continueStreamID uint32 + + for { + f, err := cc.fr.ReadFrame() + if err != nil { + cc.readerErr = err + return + } + log.Printf("Transport received %v: %#v", f.Header(), f) + + streamID := f.Header().StreamID + + _, isContinue := f.(*ContinuationFrame) + if isContinue { + if streamID != continueStreamID { + log.Printf("Protocol violation: got CONTINUATION with id %d; want %d", streamID, continueStreamID) + cc.readerErr = ConnectionError(ErrCodeProtocol) + return + } + } else if continueStreamID != 0 { + // Continue frames need to be adjacent in the stream + // and we were in the middle of headers. + log.Printf("Protocol violation: got %T for stream %d, want CONTINUATION for %d", f, streamID, continueStreamID) + cc.readerErr = ConnectionError(ErrCodeProtocol) + return + } + + if streamID%2 == 0 { + // Ignore streams pushed from the server for now. + // These always have an even stream id. + continue + } + streamEnded := false + if ff, ok := f.(streamEnder); ok { + streamEnded = ff.StreamEnded() + } + + cs := cc.streamByID(streamID, streamEnded) + if cs == nil { + log.Printf("Received frame for untracked stream ID %d", streamID) + continue + } + + switch f := f.(type) { + case *HeadersFrame: + cc.nextRes = &http.Response{ + Proto: "HTTP/2.0", + ProtoMajor: 2, + Header: make(http.Header), + } + cs.pr, cs.pw = io.Pipe() + cc.hdec.Write(f.HeaderBlockFragment()) + case *ContinuationFrame: + cc.hdec.Write(f.HeaderBlockFragment()) + case *DataFrame: + log.Printf("DATA: %q", f.Data()) + cs.pw.Write(f.Data()) + case *GoAwayFrame: + cc.t.removeClientConn(cc) + if f.ErrCode != 0 { + // TODO: deal with GOAWAY more. particularly the error code + log.Printf("transport got GOAWAY with error code = %v", f.ErrCode) + } + cc.setGoAway(f) + default: + log.Printf("Transport: unhandled response frame type %T", f) + } + headersEnded := false + if he, ok := f.(headersEnder); ok { + headersEnded = he.HeadersEnded() + if headersEnded { + continueStreamID = 0 + } else { + continueStreamID = streamID + } + } + + if streamEnded { + cs.pw.Close() + delete(activeRes, streamID) + } + if headersEnded { + if cs == nil { + panic("couldn't find stream") // TODO be graceful + } + // TODO: set the Body to one which notes the + // Close and also sends the server a + // RST_STREAM + cc.nextRes.Body = cs.pr + res := cc.nextRes + activeRes[streamID] = cs + cs.resc <- resAndError{res: res} + } + } +} + +func (cc *clientConn) onNewHeaderField(f hpack.HeaderField) { + // TODO: verifiy pseudo headers come before non-pseudo headers + // TODO: verifiy the status is set + log.Printf("Header field: %+v", f) + if f.Name == ":status" { + code, err := strconv.Atoi(f.Value) + if err != nil { + panic("TODO: be graceful") + } + cc.nextRes.Status = f.Value + " " + http.StatusText(code) + cc.nextRes.StatusCode = code + return + } + if strings.HasPrefix(f.Name, ":") { + // "Endpoints MUST NOT generate pseudo-header fields other than those defined in this document." + // TODO: treat as invalid? + return + } + cc.nextRes.Header.Add(http.CanonicalHeaderKey(f.Name), f.Value) +} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/transport_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/transport_test.go new file mode 100644 index 000000000..d752da198 --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/transport_test.go @@ -0,0 +1,168 @@ +// Copyright 2015 The Go Authors. +// See https://go.googlesource.com/go/+/master/CONTRIBUTORS +// Licensed under the same terms as Go itself: +// https://go.googlesource.com/go/+/master/LICENSE + +package http2 + +import ( + "flag" + "io" + "io/ioutil" + "net/http" + "os" + "reflect" + "strings" + "testing" + "time" +) + +var ( + extNet = flag.Bool("extnet", false, "do external network tests") + transportHost = flag.String("transporthost", "http2.golang.org", "hostname to use for TestTransport") + insecure = flag.Bool("insecure", false, "insecure TLS dials") +) + +func TestTransportExternal(t *testing.T) { + if !*extNet { + t.Skip("skipping external network test") + } + req, _ := http.NewRequest("GET", "https://"+*transportHost+"/", nil) + rt := &Transport{ + InsecureTLSDial: *insecure, + } + res, err := rt.RoundTrip(req) + if err != nil { + t.Fatalf("%v", err) + } + res.Write(os.Stdout) +} + +func TestTransport(t *testing.T) { + const body = "sup" + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, body) + }) + defer st.Close() + + tr := &Transport{InsecureTLSDial: true} + defer tr.CloseIdleConnections() + + req, err := http.NewRequest("GET", st.ts.URL, nil) + if err != nil { + t.Fatal(err) + } + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + + t.Logf("Got res: %+v", res) + if g, w := res.StatusCode, 200; g != w { + t.Errorf("StatusCode = %v; want %v", g, w) + } + if g, w := res.Status, "200 OK"; g != w { + t.Errorf("Status = %q; want %q", g, w) + } + wantHeader := http.Header{ + "Content-Length": []string{"3"}, + "Content-Type": []string{"text/plain; charset=utf-8"}, + } + if !reflect.DeepEqual(res.Header, wantHeader) { + t.Errorf("res Header = %v; want %v", res.Header, wantHeader) + } + if res.Request != req { + t.Errorf("Response.Request = %p; want %p", res.Request, req) + } + if res.TLS == nil { + t.Error("Response.TLS = nil; want non-nil") + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Errorf("Body read: %v", err) + } else if string(slurp) != body { + t.Errorf("Body = %q; want %q", slurp, body) + } + +} + +func TestTransportReusesConns(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, r.RemoteAddr) + }, optOnlyServer) + defer st.Close() + tr := &Transport{InsecureTLSDial: true} + defer tr.CloseIdleConnections() + get := func() string { + req, err := http.NewRequest("GET", st.ts.URL, nil) + if err != nil { + t.Fatal(err) + } + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatalf("Body read: %v", err) + } + addr := strings.TrimSpace(string(slurp)) + if addr == "" { + t.Fatalf("didn't get an addr in response") + } + return addr + } + first := get() + second := get() + if first != second { + t.Errorf("first and second responses were on different connections: %q vs %q", first, second) + } +} + +func TestTransportAbortClosesPipes(t *testing.T) { + shutdown := make(chan struct{}) + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + w.(http.Flusher).Flush() + <-shutdown + }, + optOnlyServer, + ) + defer st.Close() + defer close(shutdown) // we must shutdown before st.Close() to avoid hanging + + done := make(chan struct{}) + requestMade := make(chan struct{}) + go func() { + defer close(done) + tr := &Transport{ + InsecureTLSDial: true, + } + req, err := http.NewRequest("GET", st.ts.URL, nil) + if err != nil { + t.Fatal(err) + } + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + close(requestMade) + _, err = ioutil.ReadAll(res.Body) + if err == nil { + t.Error("expected error from res.Body.Read") + } + }() + + <-requestMade + // Now force the serve loop to end, via closing the connection. + st.closeConn() + // deadlock? that's a bug. + select { + case <-done: + case <-time.After(3 * time.Second): + t.Fatal("timeout") + } +} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/write.go b/Godeps/_workspace/src/github.com/bradfitz/http2/write.go new file mode 100644 index 000000000..7b9bdd3a6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/write.go @@ -0,0 +1,204 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// See https://code.google.com/p/go/source/browse/CONTRIBUTORS +// Licensed under the same terms as Go itself: +// https://code.google.com/p/go/source/browse/LICENSE + +package http2 + +import ( + "bytes" + "fmt" + "net/http" + "time" + + "github.com/bradfitz/http2/hpack" +) + +// writeFramer is implemented by any type that is used to write frames. +type writeFramer interface { + writeFrame(writeContext) error +} + +// writeContext is the interface needed by the various frame writer +// types below. All the writeFrame methods below are scheduled via the +// frame writing scheduler (see writeScheduler in writesched.go). +// +// This interface is implemented by *serverConn. +// TODO: use it from the client code too, once it exists. +type writeContext interface { + Framer() *Framer + Flush() error + CloseConn() error + // HeaderEncoder returns an HPACK encoder that writes to the + // returned buffer. + HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) +} + +// endsStream reports whether the given frame writer w will locally +// close the stream. +func endsStream(w writeFramer) bool { + switch v := w.(type) { + case *writeData: + return v.endStream + case *writeResHeaders: + return v.endStream + } + return false +} + +type flushFrameWriter struct{} + +func (flushFrameWriter) writeFrame(ctx writeContext) error { + return ctx.Flush() +} + +type writeSettings []Setting + +func (s writeSettings) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteSettings([]Setting(s)...) +} + +type writeGoAway struct { + maxStreamID uint32 + code ErrCode +} + +func (p *writeGoAway) writeFrame(ctx writeContext) error { + err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil) + if p.code != 0 { + ctx.Flush() // ignore error: we're hanging up on them anyway + time.Sleep(50 * time.Millisecond) + ctx.CloseConn() + } + return err +} + +type writeData struct { + streamID uint32 + p []byte + endStream bool +} + +func (w *writeData) String() string { + return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream) +} + +func (w *writeData) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteData(w.streamID, w.endStream, w.p) +} + +func (se StreamError) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteRSTStream(se.StreamID, se.Code) +} + +type writePingAck struct{ pf *PingFrame } + +func (w writePingAck) writeFrame(ctx writeContext) error { + return ctx.Framer().WritePing(true, w.pf.Data) +} + +type writeSettingsAck struct{} + +func (writeSettingsAck) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteSettingsAck() +} + +// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames +// for HTTP response headers from a server handler. +type writeResHeaders struct { + streamID uint32 + httpResCode int + h http.Header // may be nil + endStream bool + + contentType string + contentLength string +} + +func (w *writeResHeaders) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + enc.WriteField(hpack.HeaderField{Name: ":status", Value: httpCodeString(w.httpResCode)}) + for k, vv := range w.h { + k = lowerHeader(k) + for _, v := range vv { + // TODO: more of "8.1.2.2 Connection-Specific Header Fields" + if k == "transfer-encoding" && v != "trailers" { + continue + } + enc.WriteField(hpack.HeaderField{Name: k, Value: v}) + } + } + if w.contentType != "" { + enc.WriteField(hpack.HeaderField{Name: "content-type", Value: w.contentType}) + } + if w.contentLength != "" { + enc.WriteField(hpack.HeaderField{Name: "content-length", Value: w.contentLength}) + } + + headerBlock := buf.Bytes() + if len(headerBlock) == 0 { + panic("unexpected empty hpack") + } + + // For now we're lazy and just pick the minimum MAX_FRAME_SIZE + // that all peers must support (16KB). Later we could care + // more and send larger frames if the peer advertised it, but + // there's little point. Most headers are small anyway (so we + // generally won't have CONTINUATION frames), and extra frames + // only waste 9 bytes anyway. + const maxFrameSize = 16384 + + first := true + for len(headerBlock) > 0 { + frag := headerBlock + if len(frag) > maxFrameSize { + frag = frag[:maxFrameSize] + } + headerBlock = headerBlock[len(frag):] + endHeaders := len(headerBlock) == 0 + var err error + if first { + first = false + err = ctx.Framer().WriteHeaders(HeadersFrameParam{ + StreamID: w.streamID, + BlockFragment: frag, + EndStream: w.endStream, + EndHeaders: endHeaders, + }) + } else { + err = ctx.Framer().WriteContinuation(w.streamID, endHeaders, frag) + } + if err != nil { + return err + } + } + return nil +} + +type write100ContinueHeadersFrame struct { + streamID uint32 +} + +func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "100"}) + return ctx.Framer().WriteHeaders(HeadersFrameParam{ + StreamID: w.streamID, + BlockFragment: buf.Bytes(), + EndStream: false, + EndHeaders: true, + }) +} + +type writeWindowUpdate struct { + streamID uint32 // or 0 for conn-level + n uint32 +} + +func (wu writeWindowUpdate) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n) +} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/writesched.go b/Godeps/_workspace/src/github.com/bradfitz/http2/writesched.go new file mode 100644 index 000000000..0e1b7486f --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/writesched.go @@ -0,0 +1,286 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// See https://code.google.com/p/go/source/browse/CONTRIBUTORS +// Licensed under the same terms as Go itself: +// https://code.google.com/p/go/source/browse/LICENSE + +package http2 + +import "fmt" + +// frameWriteMsg is a request to write a frame. +type frameWriteMsg struct { + // write is the interface value that does the writing, once the + // writeScheduler (below) has decided to select this frame + // to write. The write functions are all defined in write.go. + write writeFramer + + stream *stream // used for prioritization. nil for non-stream frames. + + // done, if non-nil, must be a buffered channel with space for + // 1 message and is sent the return value from write (or an + // earlier error) when the frame has been written. + done chan error +} + +// for debugging only: +func (wm frameWriteMsg) String() string { + var streamID uint32 + if wm.stream != nil { + streamID = wm.stream.id + } + var des string + if s, ok := wm.write.(fmt.Stringer); ok { + des = s.String() + } else { + des = fmt.Sprintf("%T", wm.write) + } + return fmt.Sprintf("[frameWriteMsg stream=%d, ch=%v, type: %v]", streamID, wm.done != nil, des) +} + +// writeScheduler tracks pending frames to write, priorities, and decides +// the next one to use. It is not thread-safe. +type writeScheduler struct { + // zero are frames not associated with a specific stream. + // They're sent before any stream-specific freams. + zero writeQueue + + // maxFrameSize is the maximum size of a DATA frame + // we'll write. Must be non-zero and between 16K-16M. + maxFrameSize uint32 + + // sq contains the stream-specific queues, keyed by stream ID. + // when a stream is idle, it's deleted from the map. + sq map[uint32]*writeQueue + + // canSend is a slice of memory that's reused between frame + // scheduling decisions to hold the list of writeQueues (from sq) + // which have enough flow control data to send. After canSend is + // built, the best is selected. + canSend []*writeQueue + + // pool of empty queues for reuse. + queuePool []*writeQueue +} + +func (ws *writeScheduler) putEmptyQueue(q *writeQueue) { + if len(q.s) != 0 { + panic("queue must be empty") + } + ws.queuePool = append(ws.queuePool, q) +} + +func (ws *writeScheduler) getEmptyQueue() *writeQueue { + ln := len(ws.queuePool) + if ln == 0 { + return new(writeQueue) + } + q := ws.queuePool[ln-1] + ws.queuePool = ws.queuePool[:ln-1] + return q +} + +func (ws *writeScheduler) empty() bool { return ws.zero.empty() && len(ws.sq) == 0 } + +func (ws *writeScheduler) add(wm frameWriteMsg) { + st := wm.stream + if st == nil { + ws.zero.push(wm) + } else { + ws.streamQueue(st.id).push(wm) + } +} + +func (ws *writeScheduler) streamQueue(streamID uint32) *writeQueue { + if q, ok := ws.sq[streamID]; ok { + return q + } + if ws.sq == nil { + ws.sq = make(map[uint32]*writeQueue) + } + q := ws.getEmptyQueue() + ws.sq[streamID] = q + return q +} + +// take returns the most important frame to write and removes it from the scheduler. +// It is illegal to call this if the scheduler is empty or if there are no connection-level +// flow control bytes available. +func (ws *writeScheduler) take() (wm frameWriteMsg, ok bool) { + if ws.maxFrameSize == 0 { + panic("internal error: ws.maxFrameSize not initialized or invalid") + } + + // If there any frames not associated with streams, prefer those first. + // These are usually SETTINGS, etc. + if !ws.zero.empty() { + return ws.zero.shift(), true + } + if len(ws.sq) == 0 { + return + } + + // Next, prioritize frames on streams that aren't DATA frames (no cost). + for id, q := range ws.sq { + if q.firstIsNoCost() { + return ws.takeFrom(id, q) + } + } + + // Now, all that remains are DATA frames with non-zero bytes to + // send. So pick the best one. + if len(ws.canSend) != 0 { + panic("should be empty") + } + for _, q := range ws.sq { + if n := ws.streamWritableBytes(q); n > 0 { + ws.canSend = append(ws.canSend, q) + } + } + if len(ws.canSend) == 0 { + return + } + defer ws.zeroCanSend() + + // TODO: find the best queue + q := ws.canSend[0] + + return ws.takeFrom(q.streamID(), q) +} + +// zeroCanSend is defered from take. +func (ws *writeScheduler) zeroCanSend() { + for i := range ws.canSend { + ws.canSend[i] = nil + } + ws.canSend = ws.canSend[:0] +} + +// streamWritableBytes returns the number of DATA bytes we could write +// from the given queue's stream, if this stream/queue were +// selected. It is an error to call this if q's head isn't a +// *writeData. +func (ws *writeScheduler) streamWritableBytes(q *writeQueue) int32 { + wm := q.head() + ret := wm.stream.flow.available() // max we can write + if ret == 0 { + return 0 + } + if int32(ws.maxFrameSize) < ret { + ret = int32(ws.maxFrameSize) + } + if ret == 0 { + panic("internal error: ws.maxFrameSize not initialized or invalid") + } + wd := wm.write.(*writeData) + if len(wd.p) < int(ret) { + ret = int32(len(wd.p)) + } + return ret +} + +func (ws *writeScheduler) takeFrom(id uint32, q *writeQueue) (wm frameWriteMsg, ok bool) { + wm = q.head() + // If the first item in this queue costs flow control tokens + // and we don't have enough, write as much as we can. + if wd, ok := wm.write.(*writeData); ok && len(wd.p) > 0 { + allowed := wm.stream.flow.available() // max we can write + if allowed == 0 { + // No quota available. Caller can try the next stream. + return frameWriteMsg{}, false + } + if int32(ws.maxFrameSize) < allowed { + allowed = int32(ws.maxFrameSize) + } + // TODO: further restrict the allowed size, because even if + // the peer says it's okay to write 16MB data frames, we might + // want to write smaller ones to properly weight competing + // streams' priorities. + + if len(wd.p) > int(allowed) { + wm.stream.flow.take(allowed) + chunk := wd.p[:allowed] + wd.p = wd.p[allowed:] + // Make up a new write message of a valid size, rather + // than shifting one off the queue. + return frameWriteMsg{ + stream: wm.stream, + write: &writeData{ + streamID: wd.streamID, + p: chunk, + // even if the original had endStream set, there + // arebytes remaining because len(wd.p) > allowed, + // so we know endStream is false: + endStream: false, + }, + // our caller is blocking on the final DATA frame, not + // these intermediates, so no need to wait: + done: nil, + }, true + } + wm.stream.flow.take(int32(len(wd.p))) + } + + q.shift() + if q.empty() { + ws.putEmptyQueue(q) + delete(ws.sq, id) + } + return wm, true +} + +func (ws *writeScheduler) forgetStream(id uint32) { + q, ok := ws.sq[id] + if !ok { + return + } + delete(ws.sq, id) + + // But keep it for others later. + for i := range q.s { + q.s[i] = frameWriteMsg{} + } + q.s = q.s[:0] + ws.putEmptyQueue(q) +} + +type writeQueue struct { + s []frameWriteMsg +} + +// streamID returns the stream ID for a non-empty stream-specific queue. +func (q *writeQueue) streamID() uint32 { return q.s[0].stream.id } + +func (q *writeQueue) empty() bool { return len(q.s) == 0 } + +func (q *writeQueue) push(wm frameWriteMsg) { + q.s = append(q.s, wm) +} + +// head returns the next item that would be removed by shift. +func (q *writeQueue) head() frameWriteMsg { + if len(q.s) == 0 { + panic("invalid use of queue") + } + return q.s[0] +} + +func (q *writeQueue) shift() frameWriteMsg { + if len(q.s) == 0 { + panic("invalid use of queue") + } + wm := q.s[0] + // TODO: less copy-happy queue. + copy(q.s, q.s[1:]) + q.s[len(q.s)-1] = frameWriteMsg{} + q.s = q.s[:len(q.s)-1] + return wm +} + +func (q *writeQueue) firstIsNoCost() bool { + if df, ok := q.s[0].write.(*writeData); ok { + return len(df.p) == 0 + } + return true +} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/z_spec_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/z_spec_test.go new file mode 100644 index 000000000..07b53e3ae --- /dev/null +++ b/Godeps/_workspace/src/github.com/bradfitz/http2/z_spec_test.go @@ -0,0 +1,357 @@ +// Copyright 2014 The Go Authors. +// See https://code.google.com/p/go/source/browse/CONTRIBUTORS +// Licensed under the same terms as Go itself: +// https://code.google.com/p/go/source/browse/LICENSE + +package http2 + +import ( + "bytes" + "encoding/xml" + "flag" + "fmt" + "io" + "os" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "sync" + "testing" +) + +var coverSpec = flag.Bool("coverspec", false, "Run spec coverage tests") + +// The global map of sentence coverage for the http2 spec. +var defaultSpecCoverage specCoverage + +var loadSpecOnce sync.Once + +func loadSpec() { + if f, err := os.Open("testdata/draft-ietf-httpbis-http2.xml"); err != nil { + panic(err) + } else { + defaultSpecCoverage = readSpecCov(f) + f.Close() + } +} + +// covers marks all sentences for section sec in defaultSpecCoverage. Sentences not +// "covered" will be included in report outputed by TestSpecCoverage. +func covers(sec, sentences string) { + loadSpecOnce.Do(loadSpec) + defaultSpecCoverage.cover(sec, sentences) +} + +type specPart struct { + section string + sentence string +} + +func (ss specPart) Less(oo specPart) bool { + atoi := func(s string) int { + n, err := strconv.Atoi(s) + if err != nil { + panic(err) + } + return n + } + a := strings.Split(ss.section, ".") + b := strings.Split(oo.section, ".") + for len(a) > 0 { + if len(b) == 0 { + return false + } + x, y := atoi(a[0]), atoi(b[0]) + if x == y { + a, b = a[1:], b[1:] + continue + } + return x < y + } + if len(b) > 0 { + return true + } + return false +} + +type bySpecSection []specPart + +func (a bySpecSection) Len() int { return len(a) } +func (a bySpecSection) Less(i, j int) bool { return a[i].Less(a[j]) } +func (a bySpecSection) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +type specCoverage struct { + coverage map[specPart]bool + d *xml.Decoder +} + +func joinSection(sec []int) string { + s := fmt.Sprintf("%d", sec[0]) + for _, n := range sec[1:] { + s = fmt.Sprintf("%s.%d", s, n) + } + return s +} + +func (sc specCoverage) readSection(sec []int) { + var ( + buf = new(bytes.Buffer) + sub = 0 + ) + for { + tk, err := sc.d.Token() + if err != nil { + if err == io.EOF { + return + } + panic(err) + } + switch v := tk.(type) { + case xml.StartElement: + if skipElement(v) { + if err := sc.d.Skip(); err != nil { + panic(err) + } + if v.Name.Local == "section" { + sub++ + } + break + } + switch v.Name.Local { + case "section": + sub++ + sc.readSection(append(sec, sub)) + case "xref": + buf.Write(sc.readXRef(v)) + } + case xml.CharData: + if len(sec) == 0 { + break + } + buf.Write(v) + case xml.EndElement: + if v.Name.Local == "section" { + sc.addSentences(joinSection(sec), buf.String()) + return + } + } + } +} + +func (sc specCoverage) readXRef(se xml.StartElement) []byte { + var b []byte + for { + tk, err := sc.d.Token() + if err != nil { + panic(err) + } + switch v := tk.(type) { + case xml.CharData: + if b != nil { + panic("unexpected CharData") + } + b = []byte(string(v)) + case xml.EndElement: + if v.Name.Local != "xref" { + panic("expected ") + } + if b != nil { + return b + } + sig := attrSig(se) + switch sig { + case "target": + return []byte(fmt.Sprintf("[%s]", attrValue(se, "target"))) + case "fmt-of,rel,target", "fmt-,,rel,target": + return []byte(fmt.Sprintf("[%s, %s]", attrValue(se, "target"), attrValue(se, "rel"))) + case "fmt-of,sec,target", "fmt-,,sec,target": + return []byte(fmt.Sprintf("[section %s of %s]", attrValue(se, "sec"), attrValue(se, "target"))) + case "fmt-of,rel,sec,target": + return []byte(fmt.Sprintf("[section %s of %s, %s]", attrValue(se, "sec"), attrValue(se, "target"), attrValue(se, "rel"))) + default: + panic(fmt.Sprintf("unknown attribute signature %q in %#v", sig, fmt.Sprintf("%#v", se))) + } + default: + panic(fmt.Sprintf("unexpected tag %q", v)) + } + } +} + +var skipAnchor = map[string]bool{ + "intro": true, + "Overview": true, +} + +var skipTitle = map[string]bool{ + "Acknowledgements": true, + "Change Log": true, + "Document Organization": true, + "Conventions and Terminology": true, +} + +func skipElement(s xml.StartElement) bool { + switch s.Name.Local { + case "artwork": + return true + case "section": + for _, attr := range s.Attr { + switch attr.Name.Local { + case "anchor": + if skipAnchor[attr.Value] || strings.HasPrefix(attr.Value, "changes.since.") { + return true + } + case "title": + if skipTitle[attr.Value] { + return true + } + } + } + } + return false +} + +func readSpecCov(r io.Reader) specCoverage { + sc := specCoverage{ + coverage: map[specPart]bool{}, + d: xml.NewDecoder(r)} + sc.readSection(nil) + return sc +} + +func (sc specCoverage) addSentences(sec string, sentence string) { + for _, s := range parseSentences(sentence) { + sc.coverage[specPart{sec, s}] = false + } +} + +func (sc specCoverage) cover(sec string, sentence string) { + for _, s := range parseSentences(sentence) { + p := specPart{sec, s} + if _, ok := sc.coverage[p]; !ok { + panic(fmt.Sprintf("Not found in spec: %q, %q", sec, s)) + } + sc.coverage[specPart{sec, s}] = true + } + +} + +var whitespaceRx = regexp.MustCompile(`\s+`) + +func parseSentences(sens string) []string { + sens = strings.TrimSpace(sens) + if sens == "" { + return nil + } + ss := strings.Split(whitespaceRx.ReplaceAllString(sens, " "), ". ") + for i, s := range ss { + s = strings.TrimSpace(s) + if !strings.HasSuffix(s, ".") { + s += "." + } + ss[i] = s + } + return ss +} + +func TestSpecParseSentences(t *testing.T) { + tests := []struct { + ss string + want []string + }{ + {"Sentence 1. Sentence 2.", + []string{ + "Sentence 1.", + "Sentence 2.", + }}, + {"Sentence 1. \nSentence 2.\tSentence 3.", + []string{ + "Sentence 1.", + "Sentence 2.", + "Sentence 3.", + }}, + } + + for i, tt := range tests { + got := parseSentences(tt.ss) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("%d: got = %q, want %q", i, got, tt.want) + } + } +} + +func TestSpecCoverage(t *testing.T) { + if !*coverSpec { + t.Skip() + } + + loadSpecOnce.Do(loadSpec) + + var ( + list []specPart + cv = defaultSpecCoverage.coverage + total = len(cv) + complete = 0 + ) + + for sp, touched := range defaultSpecCoverage.coverage { + if touched { + complete++ + } else { + list = append(list, sp) + } + } + sort.Stable(bySpecSection(list)) + + if testing.Short() && len(list) > 5 { + list = list[:5] + } + + for _, p := range list { + t.Errorf("\tSECTION %s: %s", p.section, p.sentence) + } + + t.Logf("%d/%d (%d%%) sentances covered", complete, total, (complete/total)*100) +} + +func attrSig(se xml.StartElement) string { + var names []string + for _, attr := range se.Attr { + if attr.Name.Local == "fmt" { + names = append(names, "fmt-"+attr.Value) + } else { + names = append(names, attr.Name.Local) + } + } + sort.Strings(names) + return strings.Join(names, ",") +} + +func attrValue(se xml.StartElement, attr string) string { + for _, a := range se.Attr { + if a.Name.Local == attr { + return a.Value + } + } + panic("unknown attribute " + attr) +} + +func TestSpecPartLess(t *testing.T) { + tests := []struct { + sec1, sec2 string + want bool + }{ + {"6.2.1", "6.2", false}, + {"6.2", "6.2.1", true}, + {"6.10", "6.10.1", true}, + {"6.10", "6.1.1", false}, // 10, not 1 + {"6.1", "6.1", false}, // equal, so not less + } + for _, tt := range tests { + got := (specPart{tt.sec1, "foo"}).Less(specPart{tt.sec2, "foo"}) + if got != tt.want { + t.Errorf("Less(%q, %q) = %v; want %v", tt.sec1, tt.sec2, got, tt.want) + } + } +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/Makefile b/Godeps/_workspace/src/github.com/golang/protobuf/proto/Makefile new file mode 100644 index 000000000..f1f06564a --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/Makefile @@ -0,0 +1,43 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +install: + go install + +test: install generate-test-pbs + go test + + +generate-test-pbs: + make install + make -C testdata + protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata:. proto3_proto/proto3.proto + make diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/all_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/all_test.go new file mode 100644 index 000000000..49ce01f4c --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/all_test.go @@ -0,0 +1,2156 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "math" + "math/rand" + "reflect" + "runtime/debug" + "strings" + "testing" + "time" + + . "github.com/golang/protobuf/proto" + . "github.com/golang/protobuf/proto/testdata" +) + +var globalO *Buffer + +func old() *Buffer { + if globalO == nil { + globalO = NewBuffer(nil) + } + globalO.Reset() + return globalO +} + +func equalbytes(b1, b2 []byte, t *testing.T) { + if len(b1) != len(b2) { + t.Errorf("wrong lengths: 2*%d != %d", len(b1), len(b2)) + return + } + for i := 0; i < len(b1); i++ { + if b1[i] != b2[i] { + t.Errorf("bad byte[%d]:%x %x: %s %s", i, b1[i], b2[i], b1, b2) + } + } +} + +func initGoTestField() *GoTestField { + f := new(GoTestField) + f.Label = String("label") + f.Type = String("type") + return f +} + +// These are all structurally equivalent but the tag numbers differ. +// (It's remarkable that required, optional, and repeated all have +// 8 letters.) +func initGoTest_RequiredGroup() *GoTest_RequiredGroup { + return &GoTest_RequiredGroup{ + RequiredField: String("required"), + } +} + +func initGoTest_OptionalGroup() *GoTest_OptionalGroup { + return &GoTest_OptionalGroup{ + RequiredField: String("optional"), + } +} + +func initGoTest_RepeatedGroup() *GoTest_RepeatedGroup { + return &GoTest_RepeatedGroup{ + RequiredField: String("repeated"), + } +} + +func initGoTest(setdefaults bool) *GoTest { + pb := new(GoTest) + if setdefaults { + pb.F_BoolDefaulted = Bool(Default_GoTest_F_BoolDefaulted) + pb.F_Int32Defaulted = Int32(Default_GoTest_F_Int32Defaulted) + pb.F_Int64Defaulted = Int64(Default_GoTest_F_Int64Defaulted) + pb.F_Fixed32Defaulted = Uint32(Default_GoTest_F_Fixed32Defaulted) + pb.F_Fixed64Defaulted = Uint64(Default_GoTest_F_Fixed64Defaulted) + pb.F_Uint32Defaulted = Uint32(Default_GoTest_F_Uint32Defaulted) + pb.F_Uint64Defaulted = Uint64(Default_GoTest_F_Uint64Defaulted) + pb.F_FloatDefaulted = Float32(Default_GoTest_F_FloatDefaulted) + pb.F_DoubleDefaulted = Float64(Default_GoTest_F_DoubleDefaulted) + pb.F_StringDefaulted = String(Default_GoTest_F_StringDefaulted) + pb.F_BytesDefaulted = Default_GoTest_F_BytesDefaulted + pb.F_Sint32Defaulted = Int32(Default_GoTest_F_Sint32Defaulted) + pb.F_Sint64Defaulted = Int64(Default_GoTest_F_Sint64Defaulted) + } + + pb.Kind = GoTest_TIME.Enum() + pb.RequiredField = initGoTestField() + pb.F_BoolRequired = Bool(true) + pb.F_Int32Required = Int32(3) + pb.F_Int64Required = Int64(6) + pb.F_Fixed32Required = Uint32(32) + pb.F_Fixed64Required = Uint64(64) + pb.F_Uint32Required = Uint32(3232) + pb.F_Uint64Required = Uint64(6464) + pb.F_FloatRequired = Float32(3232) + pb.F_DoubleRequired = Float64(6464) + pb.F_StringRequired = String("string") + pb.F_BytesRequired = []byte("bytes") + pb.F_Sint32Required = Int32(-32) + pb.F_Sint64Required = Int64(-64) + pb.Requiredgroup = initGoTest_RequiredGroup() + + return pb +} + +func fail(msg string, b *bytes.Buffer, s string, t *testing.T) { + data := b.Bytes() + ld := len(data) + ls := len(s) / 2 + + fmt.Printf("fail %s ld=%d ls=%d\n", msg, ld, ls) + + // find the interesting spot - n + n := ls + if ld < ls { + n = ld + } + j := 0 + for i := 0; i < n; i++ { + bs := hex(s[j])*16 + hex(s[j+1]) + j += 2 + if data[i] == bs { + continue + } + n = i + break + } + l := n - 10 + if l < 0 { + l = 0 + } + h := n + 10 + + // find the interesting spot - n + fmt.Printf("is[%d]:", l) + for i := l; i < h; i++ { + if i >= ld { + fmt.Printf(" --") + continue + } + fmt.Printf(" %.2x", data[i]) + } + fmt.Printf("\n") + + fmt.Printf("sb[%d]:", l) + for i := l; i < h; i++ { + if i >= ls { + fmt.Printf(" --") + continue + } + bs := hex(s[j])*16 + hex(s[j+1]) + j += 2 + fmt.Printf(" %.2x", bs) + } + fmt.Printf("\n") + + t.Fail() + + // t.Errorf("%s: \ngood: %s\nbad: %x", msg, s, b.Bytes()) + // Print the output in a partially-decoded format; can + // be helpful when updating the test. It produces the output + // that is pasted, with minor edits, into the argument to verify(). + // data := b.Bytes() + // nesting := 0 + // for b.Len() > 0 { + // start := len(data) - b.Len() + // var u uint64 + // u, err := DecodeVarint(b) + // if err != nil { + // fmt.Printf("decode error on varint:", err) + // return + // } + // wire := u & 0x7 + // tag := u >> 3 + // switch wire { + // case WireVarint: + // v, err := DecodeVarint(b) + // if err != nil { + // fmt.Printf("decode error on varint:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", + // data[start:len(data)-b.Len()], tag, wire, v) + // case WireFixed32: + // v, err := DecodeFixed32(b) + // if err != nil { + // fmt.Printf("decode error on fixed32:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", + // data[start:len(data)-b.Len()], tag, wire, v) + // case WireFixed64: + // v, err := DecodeFixed64(b) + // if err != nil { + // fmt.Printf("decode error on fixed64:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", + // data[start:len(data)-b.Len()], tag, wire, v) + // case WireBytes: + // nb, err := DecodeVarint(b) + // if err != nil { + // fmt.Printf("decode error on bytes:", err) + // return + // } + // after_tag := len(data) - b.Len() + // str := make([]byte, nb) + // _, err = b.Read(str) + // if err != nil { + // fmt.Printf("decode error on bytes:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" \"%x\" // field %d, encoding %d (FIELD)\n", + // data[start:after_tag], str, tag, wire) + // case WireStartGroup: + // nesting++ + // fmt.Printf("\t\t\"%x\"\t\t// start group field %d level %d\n", + // data[start:len(data)-b.Len()], tag, nesting) + // case WireEndGroup: + // fmt.Printf("\t\t\"%x\"\t\t// end group field %d level %d\n", + // data[start:len(data)-b.Len()], tag, nesting) + // nesting-- + // default: + // fmt.Printf("unrecognized wire type %d\n", wire) + // return + // } + // } +} + +func hex(c uint8) uint8 { + if '0' <= c && c <= '9' { + return c - '0' + } + if 'a' <= c && c <= 'f' { + return 10 + c - 'a' + } + if 'A' <= c && c <= 'F' { + return 10 + c - 'A' + } + return 0 +} + +func equal(b []byte, s string, t *testing.T) bool { + if 2*len(b) != len(s) { + // fail(fmt.Sprintf("wrong lengths: 2*%d != %d", len(b), len(s)), b, s, t) + fmt.Printf("wrong lengths: 2*%d != %d\n", len(b), len(s)) + return false + } + for i, j := 0, 0; i < len(b); i, j = i+1, j+2 { + x := hex(s[j])*16 + hex(s[j+1]) + if b[i] != x { + // fail(fmt.Sprintf("bad byte[%d]:%x %x", i, b[i], x), b, s, t) + fmt.Printf("bad byte[%d]:%x %x", i, b[i], x) + return false + } + } + return true +} + +func overify(t *testing.T, pb *GoTest, expected string) { + o := old() + err := o.Marshal(pb) + if err != nil { + fmt.Printf("overify marshal-1 err = %v", err) + o.DebugPrint("", o.Bytes()) + t.Fatalf("expected = %s", expected) + } + if !equal(o.Bytes(), expected, t) { + o.DebugPrint("overify neq 1", o.Bytes()) + t.Fatalf("expected = %s", expected) + } + + // Now test Unmarshal by recreating the original buffer. + pbd := new(GoTest) + err = o.Unmarshal(pbd) + if err != nil { + t.Fatalf("overify unmarshal err = %v", err) + o.DebugPrint("", o.Bytes()) + t.Fatalf("string = %s", expected) + } + o.Reset() + err = o.Marshal(pbd) + if err != nil { + t.Errorf("overify marshal-2 err = %v", err) + o.DebugPrint("", o.Bytes()) + t.Fatalf("string = %s", expected) + } + if !equal(o.Bytes(), expected, t) { + o.DebugPrint("overify neq 2", o.Bytes()) + t.Fatalf("string = %s", expected) + } +} + +// Simple tests for numeric encode/decode primitives (varint, etc.) +func TestNumericPrimitives(t *testing.T) { + for i := uint64(0); i < 1e6; i += 111 { + o := old() + if o.EncodeVarint(i) != nil { + t.Error("EncodeVarint") + break + } + x, e := o.DecodeVarint() + if e != nil { + t.Fatal("DecodeVarint") + } + if x != i { + t.Fatal("varint decode fail:", i, x) + } + + o = old() + if o.EncodeFixed32(i) != nil { + t.Fatal("encFixed32") + } + x, e = o.DecodeFixed32() + if e != nil { + t.Fatal("decFixed32") + } + if x != i { + t.Fatal("fixed32 decode fail:", i, x) + } + + o = old() + if o.EncodeFixed64(i*1234567) != nil { + t.Error("encFixed64") + break + } + x, e = o.DecodeFixed64() + if e != nil { + t.Error("decFixed64") + break + } + if x != i*1234567 { + t.Error("fixed64 decode fail:", i*1234567, x) + break + } + + o = old() + i32 := int32(i - 12345) + if o.EncodeZigzag32(uint64(i32)) != nil { + t.Fatal("EncodeZigzag32") + } + x, e = o.DecodeZigzag32() + if e != nil { + t.Fatal("DecodeZigzag32") + } + if x != uint64(uint32(i32)) { + t.Fatal("zigzag32 decode fail:", i32, x) + } + + o = old() + i64 := int64(i - 12345) + if o.EncodeZigzag64(uint64(i64)) != nil { + t.Fatal("EncodeZigzag64") + } + x, e = o.DecodeZigzag64() + if e != nil { + t.Fatal("DecodeZigzag64") + } + if x != uint64(i64) { + t.Fatal("zigzag64 decode fail:", i64, x) + } + } +} + +// fakeMarshaler is a simple struct implementing Marshaler and Message interfaces. +type fakeMarshaler struct { + b []byte + err error +} + +func (f *fakeMarshaler) Marshal() ([]byte, error) { return f.b, f.err } +func (f *fakeMarshaler) String() string { return fmt.Sprintf("Bytes: %v Error: %v", f.b, f.err) } +func (f *fakeMarshaler) ProtoMessage() {} +func (f *fakeMarshaler) Reset() {} + +type msgWithFakeMarshaler struct { + M *fakeMarshaler `protobuf:"bytes,1,opt,name=fake"` +} + +func (m *msgWithFakeMarshaler) String() string { return CompactTextString(m) } +func (m *msgWithFakeMarshaler) ProtoMessage() {} +func (m *msgWithFakeMarshaler) Reset() {} + +// Simple tests for proto messages that implement the Marshaler interface. +func TestMarshalerEncoding(t *testing.T) { + tests := []struct { + name string + m Message + want []byte + wantErr error + }{ + { + name: "Marshaler that fails", + m: &fakeMarshaler{ + err: errors.New("some marshal err"), + b: []byte{5, 6, 7}, + }, + // Since there's an error, nothing should be written to buffer. + want: nil, + wantErr: errors.New("some marshal err"), + }, + { + name: "Marshaler that fails with RequiredNotSetError", + m: &msgWithFakeMarshaler{ + M: &fakeMarshaler{ + err: &RequiredNotSetError{}, + b: []byte{5, 6, 7}, + }, + }, + // Since there's an error that can be continued after, + // the buffer should be written. + want: []byte{ + 10, 3, // for &msgWithFakeMarshaler + 5, 6, 7, // for &fakeMarshaler + }, + wantErr: &RequiredNotSetError{}, + }, + { + name: "Marshaler that succeeds", + m: &fakeMarshaler{ + b: []byte{0, 1, 2, 3, 4, 127, 255}, + }, + want: []byte{0, 1, 2, 3, 4, 127, 255}, + wantErr: nil, + }, + } + for _, test := range tests { + b := NewBuffer(nil) + err := b.Marshal(test.m) + if _, ok := err.(*RequiredNotSetError); ok { + // We're not in package proto, so we can only assert the type in this case. + err = &RequiredNotSetError{} + } + if !reflect.DeepEqual(test.wantErr, err) { + t.Errorf("%s: got err %v wanted %v", test.name, err, test.wantErr) + } + if !reflect.DeepEqual(test.want, b.Bytes()) { + t.Errorf("%s: got bytes %v wanted %v", test.name, b.Bytes(), test.want) + } + } +} + +// Simple tests for bytes +func TestBytesPrimitives(t *testing.T) { + o := old() + bytes := []byte{'n', 'o', 'w', ' ', 'i', 's', ' ', 't', 'h', 'e', ' ', 't', 'i', 'm', 'e'} + if o.EncodeRawBytes(bytes) != nil { + t.Error("EncodeRawBytes") + } + decb, e := o.DecodeRawBytes(false) + if e != nil { + t.Error("DecodeRawBytes") + } + equalbytes(bytes, decb, t) +} + +// Simple tests for strings +func TestStringPrimitives(t *testing.T) { + o := old() + s := "now is the time" + if o.EncodeStringBytes(s) != nil { + t.Error("enc_string") + } + decs, e := o.DecodeStringBytes() + if e != nil { + t.Error("dec_string") + } + if s != decs { + t.Error("string encode/decode fail:", s, decs) + } +} + +// Do we catch the "required bit not set" case? +func TestRequiredBit(t *testing.T) { + o := old() + pb := new(GoTest) + err := o.Marshal(pb) + if err == nil { + t.Error("did not catch missing required fields") + } else if strings.Index(err.Error(), "Kind") < 0 { + t.Error("wrong error type:", err) + } +} + +// Check that all fields are nil. +// Clearly silly, and a residue from a more interesting test with an earlier, +// different initialization property, but it once caught a compiler bug so +// it lives. +func checkInitialized(pb *GoTest, t *testing.T) { + if pb.F_BoolDefaulted != nil { + t.Error("New or Reset did not set boolean:", *pb.F_BoolDefaulted) + } + if pb.F_Int32Defaulted != nil { + t.Error("New or Reset did not set int32:", *pb.F_Int32Defaulted) + } + if pb.F_Int64Defaulted != nil { + t.Error("New or Reset did not set int64:", *pb.F_Int64Defaulted) + } + if pb.F_Fixed32Defaulted != nil { + t.Error("New or Reset did not set fixed32:", *pb.F_Fixed32Defaulted) + } + if pb.F_Fixed64Defaulted != nil { + t.Error("New or Reset did not set fixed64:", *pb.F_Fixed64Defaulted) + } + if pb.F_Uint32Defaulted != nil { + t.Error("New or Reset did not set uint32:", *pb.F_Uint32Defaulted) + } + if pb.F_Uint64Defaulted != nil { + t.Error("New or Reset did not set uint64:", *pb.F_Uint64Defaulted) + } + if pb.F_FloatDefaulted != nil { + t.Error("New or Reset did not set float:", *pb.F_FloatDefaulted) + } + if pb.F_DoubleDefaulted != nil { + t.Error("New or Reset did not set double:", *pb.F_DoubleDefaulted) + } + if pb.F_StringDefaulted != nil { + t.Error("New or Reset did not set string:", *pb.F_StringDefaulted) + } + if pb.F_BytesDefaulted != nil { + t.Error("New or Reset did not set bytes:", string(pb.F_BytesDefaulted)) + } + if pb.F_Sint32Defaulted != nil { + t.Error("New or Reset did not set int32:", *pb.F_Sint32Defaulted) + } + if pb.F_Sint64Defaulted != nil { + t.Error("New or Reset did not set int64:", *pb.F_Sint64Defaulted) + } +} + +// Does Reset() reset? +func TestReset(t *testing.T) { + pb := initGoTest(true) + // muck with some values + pb.F_BoolDefaulted = Bool(false) + pb.F_Int32Defaulted = Int32(237) + pb.F_Int64Defaulted = Int64(12346) + pb.F_Fixed32Defaulted = Uint32(32000) + pb.F_Fixed64Defaulted = Uint64(666) + pb.F_Uint32Defaulted = Uint32(323232) + pb.F_Uint64Defaulted = nil + pb.F_FloatDefaulted = nil + pb.F_DoubleDefaulted = Float64(0) + pb.F_StringDefaulted = String("gotcha") + pb.F_BytesDefaulted = []byte("asdfasdf") + pb.F_Sint32Defaulted = Int32(123) + pb.F_Sint64Defaulted = Int64(789) + pb.Reset() + checkInitialized(pb, t) +} + +// All required fields set, no defaults provided. +func TestEncodeDecode1(t *testing.T) { + pb := initGoTest(false) + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 0x20 + "714000000000000000"+ // field 14, encoding 1, value 0x40 + "78a019"+ // field 15, encoding 0, value 0xca0 = 3232 + "8001c032"+ // field 16, encoding 0, value 0x1940 = 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2, string "string" + "b304"+ // field 70, encoding 3, start group + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // field 70, encoding 4, end group + "aa0605"+"6279746573"+ // field 101, encoding 2, string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f") // field 103, encoding 0, 0x7f zigzag64 +} + +// All required fields set, defaults provided. +func TestEncodeDecode2(t *testing.T) { + pb := initGoTest(true) + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All default fields set to their default value by hand +func TestEncodeDecode3(t *testing.T) { + pb := initGoTest(false) + pb.F_BoolDefaulted = Bool(true) + pb.F_Int32Defaulted = Int32(32) + pb.F_Int64Defaulted = Int64(64) + pb.F_Fixed32Defaulted = Uint32(320) + pb.F_Fixed64Defaulted = Uint64(640) + pb.F_Uint32Defaulted = Uint32(3200) + pb.F_Uint64Defaulted = Uint64(6400) + pb.F_FloatDefaulted = Float32(314159) + pb.F_DoubleDefaulted = Float64(271828) + pb.F_StringDefaulted = String("hello, \"world!\"\n") + pb.F_BytesDefaulted = []byte("Bignose") + pb.F_Sint32Defaulted = Int32(-32) + pb.F_Sint64Defaulted = Int64(-64) + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All required fields set, defaults provided, all non-defaulted optional fields have values. +func TestEncodeDecode4(t *testing.T) { + pb := initGoTest(true) + pb.Table = String("hello") + pb.Param = Int32(7) + pb.OptionalField = initGoTestField() + pb.F_BoolOptional = Bool(true) + pb.F_Int32Optional = Int32(32) + pb.F_Int64Optional = Int64(64) + pb.F_Fixed32Optional = Uint32(3232) + pb.F_Fixed64Optional = Uint64(6464) + pb.F_Uint32Optional = Uint32(323232) + pb.F_Uint64Optional = Uint64(646464) + pb.F_FloatOptional = Float32(32.) + pb.F_DoubleOptional = Float64(64.) + pb.F_StringOptional = String("hello") + pb.F_BytesOptional = []byte("Bignose") + pb.F_Sint32Optional = Int32(-32) + pb.F_Sint64Optional = Int64(-64) + pb.Optionalgroup = initGoTest_OptionalGroup() + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "1205"+"68656c6c6f"+ // field 2, encoding 2, string "hello" + "1807"+ // field 3, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "320d"+"0a056c6162656c120474797065"+ // field 6, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "f00101"+ // field 30, encoding 0, value 1 + "f80120"+ // field 31, encoding 0, value 32 + "800240"+ // field 32, encoding 0, value 64 + "8d02a00c0000"+ // field 33, encoding 5, value 3232 + "91024019000000000000"+ // field 34, encoding 1, value 6464 + "9802a0dd13"+ // field 35, encoding 0, value 323232 + "a002c0ba27"+ // field 36, encoding 0, value 646464 + "ad0200000042"+ // field 37, encoding 5, value 32.0 + "b1020000000000005040"+ // field 38, encoding 1, value 64.0 + "ba0205"+"68656c6c6f"+ // field 39, encoding 2, string "hello" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "d305"+ // start group field 90 level 1 + "da0508"+"6f7074696f6e616c"+ // field 91, encoding 2, string "optional" + "d405"+ // end group field 90 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "ea1207"+"4269676e6f7365"+ // field 301, encoding 2, string "Bignose" + "f0123f"+ // field 302, encoding 0, value 63 + "f8127f"+ // field 303, encoding 0, value 127 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All required fields set, defaults provided, all repeated fields given two values. +func TestEncodeDecode5(t *testing.T) { + pb := initGoTest(true) + pb.RepeatedField = []*GoTestField{initGoTestField(), initGoTestField()} + pb.F_BoolRepeated = []bool{false, true} + pb.F_Int32Repeated = []int32{32, 33} + pb.F_Int64Repeated = []int64{64, 65} + pb.F_Fixed32Repeated = []uint32{3232, 3333} + pb.F_Fixed64Repeated = []uint64{6464, 6565} + pb.F_Uint32Repeated = []uint32{323232, 333333} + pb.F_Uint64Repeated = []uint64{646464, 656565} + pb.F_FloatRepeated = []float32{32., 33.} + pb.F_DoubleRepeated = []float64{64., 65.} + pb.F_StringRepeated = []string{"hello", "sailor"} + pb.F_BytesRepeated = [][]byte{[]byte("big"), []byte("nose")} + pb.F_Sint32Repeated = []int32{32, -32} + pb.F_Sint64Repeated = []int64{64, -64} + pb.Repeatedgroup = []*GoTest_RepeatedGroup{initGoTest_RepeatedGroup(), initGoTest_RepeatedGroup()} + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) + "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "a00100"+ // field 20, encoding 0, value 0 + "a00101"+ // field 20, encoding 0, value 1 + "a80120"+ // field 21, encoding 0, value 32 + "a80121"+ // field 21, encoding 0, value 33 + "b00140"+ // field 22, encoding 0, value 64 + "b00141"+ // field 22, encoding 0, value 65 + "bd01a00c0000"+ // field 23, encoding 5, value 3232 + "bd01050d0000"+ // field 23, encoding 5, value 3333 + "c1014019000000000000"+ // field 24, encoding 1, value 6464 + "c101a519000000000000"+ // field 24, encoding 1, value 6565 + "c801a0dd13"+ // field 25, encoding 0, value 323232 + "c80195ac14"+ // field 25, encoding 0, value 333333 + "d001c0ba27"+ // field 26, encoding 0, value 646464 + "d001b58928"+ // field 26, encoding 0, value 656565 + "dd0100000042"+ // field 27, encoding 5, value 32.0 + "dd0100000442"+ // field 27, encoding 5, value 33.0 + "e1010000000000005040"+ // field 28, encoding 1, value 64.0 + "e1010000000000405040"+ // field 28, encoding 1, value 65.0 + "ea0105"+"68656c6c6f"+ // field 29, encoding 2, string "hello" + "ea0106"+"7361696c6f72"+ // field 29, encoding 2, string "sailor" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "8305"+ // start group field 80 level 1 + "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" + "8405"+ // end group field 80 level 1 + "8305"+ // start group field 80 level 1 + "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" + "8405"+ // end group field 80 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "ca0c03"+"626967"+ // field 201, encoding 2, string "big" + "ca0c04"+"6e6f7365"+ // field 201, encoding 2, string "nose" + "d00c40"+ // field 202, encoding 0, value 32 + "d00c3f"+ // field 202, encoding 0, value -32 + "d80c8001"+ // field 203, encoding 0, value 64 + "d80c7f"+ // field 203, encoding 0, value -64 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All required fields set, all packed repeated fields given two values. +func TestEncodeDecode6(t *testing.T) { + pb := initGoTest(false) + pb.F_BoolRepeatedPacked = []bool{false, true} + pb.F_Int32RepeatedPacked = []int32{32, 33} + pb.F_Int64RepeatedPacked = []int64{64, 65} + pb.F_Fixed32RepeatedPacked = []uint32{3232, 3333} + pb.F_Fixed64RepeatedPacked = []uint64{6464, 6565} + pb.F_Uint32RepeatedPacked = []uint32{323232, 333333} + pb.F_Uint64RepeatedPacked = []uint64{646464, 656565} + pb.F_FloatRepeatedPacked = []float32{32., 33.} + pb.F_DoubleRepeatedPacked = []float64{64., 65.} + pb.F_Sint32RepeatedPacked = []int32{32, -32} + pb.F_Sint64RepeatedPacked = []int64{64, -64} + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "9203020001"+ // field 50, encoding 2, 2 bytes, value 0, value 1 + "9a03022021"+ // field 51, encoding 2, 2 bytes, value 32, value 33 + "a203024041"+ // field 52, encoding 2, 2 bytes, value 64, value 65 + "aa0308"+ // field 53, encoding 2, 8 bytes + "a00c0000050d0000"+ // value 3232, value 3333 + "b20310"+ // field 54, encoding 2, 16 bytes + "4019000000000000a519000000000000"+ // value 6464, value 6565 + "ba0306"+ // field 55, encoding 2, 6 bytes + "a0dd1395ac14"+ // value 323232, value 333333 + "c20306"+ // field 56, encoding 2, 6 bytes + "c0ba27b58928"+ // value 646464, value 656565 + "ca0308"+ // field 57, encoding 2, 8 bytes + "0000004200000442"+ // value 32.0, value 33.0 + "d20310"+ // field 58, encoding 2, 16 bytes + "00000000000050400000000000405040"+ // value 64.0, value 65.0 + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "b21f02"+ // field 502, encoding 2, 2 bytes + "403f"+ // value 32, value -32 + "ba1f03"+ // field 503, encoding 2, 3 bytes + "80017f") // value 64, value -64 +} + +// Test that we can encode empty bytes fields. +func TestEncodeDecodeBytes1(t *testing.T) { + pb := initGoTest(false) + + // Create our bytes + pb.F_BytesRequired = []byte{} + pb.F_BytesRepeated = [][]byte{{}} + pb.F_BytesOptional = []byte{} + + d, err := Marshal(pb) + if err != nil { + t.Error(err) + } + + pbd := new(GoTest) + if err := Unmarshal(d, pbd); err != nil { + t.Error(err) + } + + if pbd.F_BytesRequired == nil || len(pbd.F_BytesRequired) != 0 { + t.Error("required empty bytes field is incorrect") + } + if pbd.F_BytesRepeated == nil || len(pbd.F_BytesRepeated) == 1 && pbd.F_BytesRepeated[0] == nil { + t.Error("repeated empty bytes field is incorrect") + } + if pbd.F_BytesOptional == nil || len(pbd.F_BytesOptional) != 0 { + t.Error("optional empty bytes field is incorrect") + } +} + +// Test that we encode nil-valued fields of a repeated bytes field correctly. +// Since entries in a repeated field cannot be nil, nil must mean empty value. +func TestEncodeDecodeBytes2(t *testing.T) { + pb := initGoTest(false) + + // Create our bytes + pb.F_BytesRepeated = [][]byte{nil} + + d, err := Marshal(pb) + if err != nil { + t.Error(err) + } + + pbd := new(GoTest) + if err := Unmarshal(d, pbd); err != nil { + t.Error(err) + } + + if len(pbd.F_BytesRepeated) != 1 || pbd.F_BytesRepeated[0] == nil { + t.Error("Unexpected value for repeated bytes field") + } +} + +// All required fields set, defaults provided, all repeated fields given two values. +func TestSkippingUnrecognizedFields(t *testing.T) { + o := old() + pb := initGoTestField() + + // Marshal it normally. + o.Marshal(pb) + + // Now new a GoSkipTest record. + skip := &GoSkipTest{ + SkipInt32: Int32(32), + SkipFixed32: Uint32(3232), + SkipFixed64: Uint64(6464), + SkipString: String("skipper"), + Skipgroup: &GoSkipTest_SkipGroup{ + GroupInt32: Int32(75), + GroupString: String("wxyz"), + }, + } + + // Marshal it into same buffer. + o.Marshal(skip) + + pbd := new(GoTestField) + o.Unmarshal(pbd) + + // The __unrecognized field should be a marshaling of GoSkipTest + skipd := new(GoSkipTest) + + o.SetBuf(pbd.XXX_unrecognized) + o.Unmarshal(skipd) + + if *skipd.SkipInt32 != *skip.SkipInt32 { + t.Error("skip int32", skipd.SkipInt32) + } + if *skipd.SkipFixed32 != *skip.SkipFixed32 { + t.Error("skip fixed32", skipd.SkipFixed32) + } + if *skipd.SkipFixed64 != *skip.SkipFixed64 { + t.Error("skip fixed64", skipd.SkipFixed64) + } + if *skipd.SkipString != *skip.SkipString { + t.Error("skip string", *skipd.SkipString) + } + if *skipd.Skipgroup.GroupInt32 != *skip.Skipgroup.GroupInt32 { + t.Error("skip group int32", skipd.Skipgroup.GroupInt32) + } + if *skipd.Skipgroup.GroupString != *skip.Skipgroup.GroupString { + t.Error("skip group string", *skipd.Skipgroup.GroupString) + } +} + +// Check that unrecognized fields of a submessage are preserved. +func TestSubmessageUnrecognizedFields(t *testing.T) { + nm := &NewMessage{ + Nested: &NewMessage_Nested{ + Name: String("Nigel"), + FoodGroup: String("carbs"), + }, + } + b, err := Marshal(nm) + if err != nil { + t.Fatalf("Marshal of NewMessage: %v", err) + } + + // Unmarshal into an OldMessage. + om := new(OldMessage) + if err := Unmarshal(b, om); err != nil { + t.Fatalf("Unmarshal to OldMessage: %v", err) + } + exp := &OldMessage{ + Nested: &OldMessage_Nested{ + Name: String("Nigel"), + // normal protocol buffer users should not do this + XXX_unrecognized: []byte("\x12\x05carbs"), + }, + } + if !Equal(om, exp) { + t.Errorf("om = %v, want %v", om, exp) + } + + // Clone the OldMessage. + om = Clone(om).(*OldMessage) + if !Equal(om, exp) { + t.Errorf("Clone(om) = %v, want %v", om, exp) + } + + // Marshal the OldMessage, then unmarshal it into an empty NewMessage. + if b, err = Marshal(om); err != nil { + t.Fatalf("Marshal of OldMessage: %v", err) + } + t.Logf("Marshal(%v) -> %q", om, b) + nm2 := new(NewMessage) + if err := Unmarshal(b, nm2); err != nil { + t.Fatalf("Unmarshal to NewMessage: %v", err) + } + if !Equal(nm, nm2) { + t.Errorf("NewMessage round-trip: %v => %v", nm, nm2) + } +} + +// Check that an int32 field can be upgraded to an int64 field. +func TestNegativeInt32(t *testing.T) { + om := &OldMessage{ + Num: Int32(-1), + } + b, err := Marshal(om) + if err != nil { + t.Fatalf("Marshal of OldMessage: %v", err) + } + + // Check the size. It should be 11 bytes; + // 1 for the field/wire type, and 10 for the negative number. + if len(b) != 11 { + t.Errorf("%v marshaled as %q, wanted 11 bytes", om, b) + } + + // Unmarshal into a NewMessage. + nm := new(NewMessage) + if err := Unmarshal(b, nm); err != nil { + t.Fatalf("Unmarshal to NewMessage: %v", err) + } + want := &NewMessage{ + Num: Int64(-1), + } + if !Equal(nm, want) { + t.Errorf("nm = %v, want %v", nm, want) + } +} + +// Check that we can grow an array (repeated field) to have many elements. +// This test doesn't depend only on our encoding; for variety, it makes sure +// we create, encode, and decode the correct contents explicitly. It's therefore +// a bit messier. +// This test also uses (and hence tests) the Marshal/Unmarshal functions +// instead of the methods. +func TestBigRepeated(t *testing.T) { + pb := initGoTest(true) + + // Create the arrays + const N = 50 // Internally the library starts much smaller. + pb.Repeatedgroup = make([]*GoTest_RepeatedGroup, N) + pb.F_Sint64Repeated = make([]int64, N) + pb.F_Sint32Repeated = make([]int32, N) + pb.F_BytesRepeated = make([][]byte, N) + pb.F_StringRepeated = make([]string, N) + pb.F_DoubleRepeated = make([]float64, N) + pb.F_FloatRepeated = make([]float32, N) + pb.F_Uint64Repeated = make([]uint64, N) + pb.F_Uint32Repeated = make([]uint32, N) + pb.F_Fixed64Repeated = make([]uint64, N) + pb.F_Fixed32Repeated = make([]uint32, N) + pb.F_Int64Repeated = make([]int64, N) + pb.F_Int32Repeated = make([]int32, N) + pb.F_BoolRepeated = make([]bool, N) + pb.RepeatedField = make([]*GoTestField, N) + + // Fill in the arrays with checkable values. + igtf := initGoTestField() + igtrg := initGoTest_RepeatedGroup() + for i := 0; i < N; i++ { + pb.Repeatedgroup[i] = igtrg + pb.F_Sint64Repeated[i] = int64(i) + pb.F_Sint32Repeated[i] = int32(i) + s := fmt.Sprint(i) + pb.F_BytesRepeated[i] = []byte(s) + pb.F_StringRepeated[i] = s + pb.F_DoubleRepeated[i] = float64(i) + pb.F_FloatRepeated[i] = float32(i) + pb.F_Uint64Repeated[i] = uint64(i) + pb.F_Uint32Repeated[i] = uint32(i) + pb.F_Fixed64Repeated[i] = uint64(i) + pb.F_Fixed32Repeated[i] = uint32(i) + pb.F_Int64Repeated[i] = int64(i) + pb.F_Int32Repeated[i] = int32(i) + pb.F_BoolRepeated[i] = i%2 == 0 + pb.RepeatedField[i] = igtf + } + + // Marshal. + buf, _ := Marshal(pb) + + // Now test Unmarshal by recreating the original buffer. + pbd := new(GoTest) + Unmarshal(buf, pbd) + + // Check the checkable values + for i := uint64(0); i < N; i++ { + if pbd.Repeatedgroup[i] == nil { // TODO: more checking? + t.Error("pbd.Repeatedgroup bad") + } + var x uint64 + x = uint64(pbd.F_Sint64Repeated[i]) + if x != i { + t.Error("pbd.F_Sint64Repeated bad", x, i) + } + x = uint64(pbd.F_Sint32Repeated[i]) + if x != i { + t.Error("pbd.F_Sint32Repeated bad", x, i) + } + s := fmt.Sprint(i) + equalbytes(pbd.F_BytesRepeated[i], []byte(s), t) + if pbd.F_StringRepeated[i] != s { + t.Error("pbd.F_Sint32Repeated bad", pbd.F_StringRepeated[i], i) + } + x = uint64(pbd.F_DoubleRepeated[i]) + if x != i { + t.Error("pbd.F_DoubleRepeated bad", x, i) + } + x = uint64(pbd.F_FloatRepeated[i]) + if x != i { + t.Error("pbd.F_FloatRepeated bad", x, i) + } + x = pbd.F_Uint64Repeated[i] + if x != i { + t.Error("pbd.F_Uint64Repeated bad", x, i) + } + x = uint64(pbd.F_Uint32Repeated[i]) + if x != i { + t.Error("pbd.F_Uint32Repeated bad", x, i) + } + x = pbd.F_Fixed64Repeated[i] + if x != i { + t.Error("pbd.F_Fixed64Repeated bad", x, i) + } + x = uint64(pbd.F_Fixed32Repeated[i]) + if x != i { + t.Error("pbd.F_Fixed32Repeated bad", x, i) + } + x = uint64(pbd.F_Int64Repeated[i]) + if x != i { + t.Error("pbd.F_Int64Repeated bad", x, i) + } + x = uint64(pbd.F_Int32Repeated[i]) + if x != i { + t.Error("pbd.F_Int32Repeated bad", x, i) + } + if pbd.F_BoolRepeated[i] != (i%2 == 0) { + t.Error("pbd.F_BoolRepeated bad", x, i) + } + if pbd.RepeatedField[i] == nil { // TODO: more checking? + t.Error("pbd.RepeatedField bad") + } + } +} + +// Verify we give a useful message when decoding to the wrong structure type. +func TestTypeMismatch(t *testing.T) { + pb1 := initGoTest(true) + + // Marshal + o := old() + o.Marshal(pb1) + + // Now Unmarshal it to the wrong type. + pb2 := initGoTestField() + err := o.Unmarshal(pb2) + if err == nil { + t.Error("expected error, got no error") + } else if !strings.Contains(err.Error(), "bad wiretype") { + t.Error("expected bad wiretype error, got", err) + } +} + +func encodeDecode(t *testing.T, in, out Message, msg string) { + buf, err := Marshal(in) + if err != nil { + t.Fatalf("failed marshaling %v: %v", msg, err) + } + if err := Unmarshal(buf, out); err != nil { + t.Fatalf("failed unmarshaling %v: %v", msg, err) + } +} + +func TestPackedNonPackedDecoderSwitching(t *testing.T) { + np, p := new(NonPackedTest), new(PackedTest) + + // non-packed -> packed + np.A = []int32{0, 1, 1, 2, 3, 5} + encodeDecode(t, np, p, "non-packed -> packed") + if !reflect.DeepEqual(np.A, p.B) { + t.Errorf("failed non-packed -> packed; np.A=%+v, p.B=%+v", np.A, p.B) + } + + // packed -> non-packed + np.Reset() + p.B = []int32{3, 1, 4, 1, 5, 9} + encodeDecode(t, p, np, "packed -> non-packed") + if !reflect.DeepEqual(p.B, np.A) { + t.Errorf("failed packed -> non-packed; p.B=%+v, np.A=%+v", p.B, np.A) + } +} + +func TestProto1RepeatedGroup(t *testing.T) { + pb := &MessageList{ + Message: []*MessageList_Message{ + { + Name: String("blah"), + Count: Int32(7), + }, + // NOTE: pb.Message[1] is a nil + nil, + }, + } + + o := old() + err := o.Marshal(pb) + if err == nil || !strings.Contains(err.Error(), "repeated field Message has nil") { + t.Fatalf("unexpected or no error when marshaling: %v", err) + } +} + +// Test that enums work. Checks for a bug introduced by making enums +// named types instead of int32: newInt32FromUint64 would crash with +// a type mismatch in reflect.PointTo. +func TestEnum(t *testing.T) { + pb := new(GoEnum) + pb.Foo = FOO_FOO1.Enum() + o := old() + if err := o.Marshal(pb); err != nil { + t.Fatal("error encoding enum:", err) + } + pb1 := new(GoEnum) + if err := o.Unmarshal(pb1); err != nil { + t.Fatal("error decoding enum:", err) + } + if *pb1.Foo != FOO_FOO1 { + t.Error("expected 7 but got ", *pb1.Foo) + } +} + +// Enum types have String methods. Check that enum fields can be printed. +// We don't care what the value actually is, just as long as it doesn't crash. +func TestPrintingNilEnumFields(t *testing.T) { + pb := new(GoEnum) + fmt.Sprintf("%+v", pb) +} + +// Verify that absent required fields cause Marshal/Unmarshal to return errors. +func TestRequiredFieldEnforcement(t *testing.T) { + pb := new(GoTestField) + _, err := Marshal(pb) + if err == nil { + t.Error("marshal: expected error, got nil") + } else if strings.Index(err.Error(), "Label") < 0 { + t.Errorf("marshal: bad error type: %v", err) + } + + // A slightly sneaky, yet valid, proto. It encodes the same required field twice, + // so simply counting the required fields is insufficient. + // field 1, encoding 2, value "hi" + buf := []byte("\x0A\x02hi\x0A\x02hi") + err = Unmarshal(buf, pb) + if err == nil { + t.Error("unmarshal: expected error, got nil") + } else if strings.Index(err.Error(), "{Unknown}") < 0 { + t.Errorf("unmarshal: bad error type: %v", err) + } +} + +func TestTypedNilMarshal(t *testing.T) { + // A typed nil should return ErrNil and not crash. + _, err := Marshal((*GoEnum)(nil)) + if err != ErrNil { + t.Errorf("Marshal: got err %v, want ErrNil", err) + } +} + +// A type that implements the Marshaler interface, but is not nillable. +type nonNillableInt uint64 + +func (nni nonNillableInt) Marshal() ([]byte, error) { + return EncodeVarint(uint64(nni)), nil +} + +type NNIMessage struct { + nni nonNillableInt +} + +func (*NNIMessage) Reset() {} +func (*NNIMessage) String() string { return "" } +func (*NNIMessage) ProtoMessage() {} + +// A type that implements the Marshaler interface and is nillable. +type nillableMessage struct { + x uint64 +} + +func (nm *nillableMessage) Marshal() ([]byte, error) { + return EncodeVarint(nm.x), nil +} + +type NMMessage struct { + nm *nillableMessage +} + +func (*NMMessage) Reset() {} +func (*NMMessage) String() string { return "" } +func (*NMMessage) ProtoMessage() {} + +// Verify a type that uses the Marshaler interface, but has a nil pointer. +func TestNilMarshaler(t *testing.T) { + // Try a struct with a Marshaler field that is nil. + // It should be directly marshable. + nmm := new(NMMessage) + if _, err := Marshal(nmm); err != nil { + t.Error("unexpected error marshaling nmm: ", err) + } + + // Try a struct with a Marshaler field that is not nillable. + nnim := new(NNIMessage) + nnim.nni = 7 + var _ Marshaler = nnim.nni // verify it is truly a Marshaler + if _, err := Marshal(nnim); err != nil { + t.Error("unexpected error marshaling nnim: ", err) + } +} + +func TestAllSetDefaults(t *testing.T) { + // Exercise SetDefaults with all scalar field types. + m := &Defaults{ + // NaN != NaN, so override that here. + F_Nan: Float32(1.7), + } + expected := &Defaults{ + F_Bool: Bool(true), + F_Int32: Int32(32), + F_Int64: Int64(64), + F_Fixed32: Uint32(320), + F_Fixed64: Uint64(640), + F_Uint32: Uint32(3200), + F_Uint64: Uint64(6400), + F_Float: Float32(314159), + F_Double: Float64(271828), + F_String: String(`hello, "world!"` + "\n"), + F_Bytes: []byte("Bignose"), + F_Sint32: Int32(-32), + F_Sint64: Int64(-64), + F_Enum: Defaults_GREEN.Enum(), + F_Pinf: Float32(float32(math.Inf(1))), + F_Ninf: Float32(float32(math.Inf(-1))), + F_Nan: Float32(1.7), + StrZero: String(""), + } + SetDefaults(m) + if !Equal(m, expected) { + t.Errorf("SetDefaults failed\n got %v\nwant %v", m, expected) + } +} + +func TestSetDefaultsWithSetField(t *testing.T) { + // Check that a set value is not overridden. + m := &Defaults{ + F_Int32: Int32(12), + } + SetDefaults(m) + if v := m.GetF_Int32(); v != 12 { + t.Errorf("m.FInt32 = %v, want 12", v) + } +} + +func TestSetDefaultsWithSubMessage(t *testing.T) { + m := &OtherMessage{ + Key: Int64(123), + Inner: &InnerMessage{ + Host: String("gopher"), + }, + } + expected := &OtherMessage{ + Key: Int64(123), + Inner: &InnerMessage{ + Host: String("gopher"), + Port: Int32(4000), + }, + } + SetDefaults(m) + if !Equal(m, expected) { + t.Errorf("\n got %v\nwant %v", m, expected) + } +} + +func TestSetDefaultsWithRepeatedSubMessage(t *testing.T) { + m := &MyMessage{ + RepInner: []*InnerMessage{{}}, + } + expected := &MyMessage{ + RepInner: []*InnerMessage{{ + Port: Int32(4000), + }}, + } + SetDefaults(m) + if !Equal(m, expected) { + t.Errorf("\n got %v\nwant %v", m, expected) + } +} + +func TestSetDefaultWithRepeatedNonMessage(t *testing.T) { + m := &MyMessage{ + Pet: []string{"turtle", "wombat"}, + } + expected := Clone(m) + SetDefaults(m) + if !Equal(m, expected) { + t.Errorf("\n got %v\nwant %v", m, expected) + } +} + +func TestMaximumTagNumber(t *testing.T) { + m := &MaxTag{ + LastField: String("natural goat essence"), + } + buf, err := Marshal(m) + if err != nil { + t.Fatalf("proto.Marshal failed: %v", err) + } + m2 := new(MaxTag) + if err := Unmarshal(buf, m2); err != nil { + t.Fatalf("proto.Unmarshal failed: %v", err) + } + if got, want := m2.GetLastField(), *m.LastField; got != want { + t.Errorf("got %q, want %q", got, want) + } +} + +func TestJSON(t *testing.T) { + m := &MyMessage{ + Count: Int32(4), + Pet: []string{"bunny", "kitty"}, + Inner: &InnerMessage{ + Host: String("cauchy"), + }, + Bikeshed: MyMessage_GREEN.Enum(), + } + const expected = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":1}` + + b, err := json.Marshal(m) + if err != nil { + t.Fatalf("json.Marshal failed: %v", err) + } + s := string(b) + if s != expected { + t.Errorf("got %s\nwant %s", s, expected) + } + + received := new(MyMessage) + if err := json.Unmarshal(b, received); err != nil { + t.Fatalf("json.Unmarshal failed: %v", err) + } + if !Equal(received, m) { + t.Fatalf("got %s, want %s", received, m) + } + + // Test unmarshalling of JSON with symbolic enum name. + const old = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":"GREEN"}` + received.Reset() + if err := json.Unmarshal([]byte(old), received); err != nil { + t.Fatalf("json.Unmarshal failed: %v", err) + } + if !Equal(received, m) { + t.Fatalf("got %s, want %s", received, m) + } +} + +func TestBadWireType(t *testing.T) { + b := []byte{7<<3 | 6} // field 7, wire type 6 + pb := new(OtherMessage) + if err := Unmarshal(b, pb); err == nil { + t.Errorf("Unmarshal did not fail") + } else if !strings.Contains(err.Error(), "unknown wire type") { + t.Errorf("wrong error: %v", err) + } +} + +func TestBytesWithInvalidLength(t *testing.T) { + // If a byte sequence has an invalid (negative) length, Unmarshal should not panic. + b := []byte{2<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0} + Unmarshal(b, new(MyMessage)) +} + +func TestLengthOverflow(t *testing.T) { + // Overflowing a length should not panic. + b := []byte{2<<3 | WireBytes, 1, 1, 3<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x01} + Unmarshal(b, new(MyMessage)) +} + +func TestVarintOverflow(t *testing.T) { + // Overflowing a 64-bit length should not be allowed. + b := []byte{1<<3 | WireVarint, 0x01, 3<<3 | WireBytes, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01} + if err := Unmarshal(b, new(MyMessage)); err == nil { + t.Fatalf("Overflowed uint64 length without error") + } +} + +func TestUnmarshalFuzz(t *testing.T) { + const N = 1000 + seed := time.Now().UnixNano() + t.Logf("RNG seed is %d", seed) + rng := rand.New(rand.NewSource(seed)) + buf := make([]byte, 20) + for i := 0; i < N; i++ { + for j := range buf { + buf[j] = byte(rng.Intn(256)) + } + fuzzUnmarshal(t, buf) + } +} + +func TestMergeMessages(t *testing.T) { + pb := &MessageList{Message: []*MessageList_Message{{Name: String("x"), Count: Int32(1)}}} + data, err := Marshal(pb) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + + pb1 := new(MessageList) + if err := Unmarshal(data, pb1); err != nil { + t.Fatalf("first Unmarshal: %v", err) + } + if err := Unmarshal(data, pb1); err != nil { + t.Fatalf("second Unmarshal: %v", err) + } + if len(pb1.Message) != 1 { + t.Errorf("two Unmarshals produced %d Messages, want 1", len(pb1.Message)) + } + + pb2 := new(MessageList) + if err := UnmarshalMerge(data, pb2); err != nil { + t.Fatalf("first UnmarshalMerge: %v", err) + } + if err := UnmarshalMerge(data, pb2); err != nil { + t.Fatalf("second UnmarshalMerge: %v", err) + } + if len(pb2.Message) != 2 { + t.Errorf("two UnmarshalMerges produced %d Messages, want 2", len(pb2.Message)) + } +} + +func TestExtensionMarshalOrder(t *testing.T) { + m := &MyMessage{Count: Int(123)} + if err := SetExtension(m, E_Ext_More, &Ext{Data: String("alpha")}); err != nil { + t.Fatalf("SetExtension: %v", err) + } + if err := SetExtension(m, E_Ext_Text, String("aleph")); err != nil { + t.Fatalf("SetExtension: %v", err) + } + if err := SetExtension(m, E_Ext_Number, Int32(1)); err != nil { + t.Fatalf("SetExtension: %v", err) + } + + // Serialize m several times, and check we get the same bytes each time. + var orig []byte + for i := 0; i < 100; i++ { + b, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + if i == 0 { + orig = b + continue + } + if !bytes.Equal(b, orig) { + t.Errorf("Bytes differ on attempt #%d", i) + } + } +} + +// Many extensions, because small maps might not iterate differently on each iteration. +var exts = []*ExtensionDesc{ + E_X201, + E_X202, + E_X203, + E_X204, + E_X205, + E_X206, + E_X207, + E_X208, + E_X209, + E_X210, + E_X211, + E_X212, + E_X213, + E_X214, + E_X215, + E_X216, + E_X217, + E_X218, + E_X219, + E_X220, + E_X221, + E_X222, + E_X223, + E_X224, + E_X225, + E_X226, + E_X227, + E_X228, + E_X229, + E_X230, + E_X231, + E_X232, + E_X233, + E_X234, + E_X235, + E_X236, + E_X237, + E_X238, + E_X239, + E_X240, + E_X241, + E_X242, + E_X243, + E_X244, + E_X245, + E_X246, + E_X247, + E_X248, + E_X249, + E_X250, +} + +func TestMessageSetMarshalOrder(t *testing.T) { + m := &MyMessageSet{} + for _, x := range exts { + if err := SetExtension(m, x, &Empty{}); err != nil { + t.Fatalf("SetExtension: %v", err) + } + } + + buf, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + + // Serialize m several times, and check we get the same bytes each time. + for i := 0; i < 10; i++ { + b1, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + if !bytes.Equal(b1, buf) { + t.Errorf("Bytes differ on re-Marshal #%d", i) + } + + m2 := &MyMessageSet{} + if err := Unmarshal(buf, m2); err != nil { + t.Errorf("Unmarshal: %v", err) + } + b2, err := Marshal(m2) + if err != nil { + t.Errorf("re-Marshal: %v", err) + } + if !bytes.Equal(b2, buf) { + t.Errorf("Bytes differ on round-trip #%d", i) + } + } +} + +func TestUnmarshalMergesMessages(t *testing.T) { + // If a nested message occurs twice in the input, + // the fields should be merged when decoding. + a := &OtherMessage{ + Key: Int64(123), + Inner: &InnerMessage{ + Host: String("polhode"), + Port: Int32(1234), + }, + } + aData, err := Marshal(a) + if err != nil { + t.Fatalf("Marshal(a): %v", err) + } + b := &OtherMessage{ + Weight: Float32(1.2), + Inner: &InnerMessage{ + Host: String("herpolhode"), + Connected: Bool(true), + }, + } + bData, err := Marshal(b) + if err != nil { + t.Fatalf("Marshal(b): %v", err) + } + want := &OtherMessage{ + Key: Int64(123), + Weight: Float32(1.2), + Inner: &InnerMessage{ + Host: String("herpolhode"), + Port: Int32(1234), + Connected: Bool(true), + }, + } + got := new(OtherMessage) + if err := Unmarshal(append(aData, bData...), got); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + if !Equal(got, want) { + t.Errorf("\n got %v\nwant %v", got, want) + } +} + +func TestEncodingSizes(t *testing.T) { + tests := []struct { + m Message + n int + }{ + {&Defaults{F_Int32: Int32(math.MaxInt32)}, 6}, + {&Defaults{F_Int32: Int32(math.MinInt32)}, 11}, + {&Defaults{F_Uint32: Uint32(uint32(math.MaxInt32) + 1)}, 6}, + {&Defaults{F_Uint32: Uint32(math.MaxUint32)}, 6}, + } + for _, test := range tests { + b, err := Marshal(test.m) + if err != nil { + t.Errorf("Marshal(%v): %v", test.m, err) + continue + } + if len(b) != test.n { + t.Errorf("Marshal(%v) yielded %d bytes, want %d bytes", test.m, len(b), test.n) + } + } +} + +func TestRequiredNotSetError(t *testing.T) { + pb := initGoTest(false) + pb.RequiredField.Label = nil + pb.F_Int32Required = nil + pb.F_Int64Required = nil + + expected := "0807" + // field 1, encoding 0, value 7 + "2206" + "120474797065" + // field 4, encoding 2 (GoTestField) + "5001" + // field 10, encoding 0, value 1 + "6d20000000" + // field 13, encoding 5, value 0x20 + "714000000000000000" + // field 14, encoding 1, value 0x40 + "78a019" + // field 15, encoding 0, value 0xca0 = 3232 + "8001c032" + // field 16, encoding 0, value 0x1940 = 6464 + "8d0100004a45" + // field 17, encoding 5, value 3232.0 + "9101000000000040b940" + // field 18, encoding 1, value 6464.0 + "9a0106" + "737472696e67" + // field 19, encoding 2, string "string" + "b304" + // field 70, encoding 3, start group + "ba0408" + "7265717569726564" + // field 71, encoding 2, string "required" + "b404" + // field 70, encoding 4, end group + "aa0605" + "6279746573" + // field 101, encoding 2, string "bytes" + "b0063f" + // field 102, encoding 0, 0x3f zigzag32 + "b8067f" // field 103, encoding 0, 0x7f zigzag64 + + o := old() + bytes, err := Marshal(pb) + if _, ok := err.(*RequiredNotSetError); !ok { + fmt.Printf("marshal-1 err = %v, want *RequiredNotSetError", err) + o.DebugPrint("", bytes) + t.Fatalf("expected = %s", expected) + } + if strings.Index(err.Error(), "RequiredField.Label") < 0 { + t.Errorf("marshal-1 wrong err msg: %v", err) + } + if !equal(bytes, expected, t) { + o.DebugPrint("neq 1", bytes) + t.Fatalf("expected = %s", expected) + } + + // Now test Unmarshal by recreating the original buffer. + pbd := new(GoTest) + err = Unmarshal(bytes, pbd) + if _, ok := err.(*RequiredNotSetError); !ok { + t.Fatalf("unmarshal err = %v, want *RequiredNotSetError", err) + o.DebugPrint("", bytes) + t.Fatalf("string = %s", expected) + } + if strings.Index(err.Error(), "RequiredField.{Unknown}") < 0 { + t.Errorf("unmarshal wrong err msg: %v", err) + } + bytes, err = Marshal(pbd) + if _, ok := err.(*RequiredNotSetError); !ok { + t.Errorf("marshal-2 err = %v, want *RequiredNotSetError", err) + o.DebugPrint("", bytes) + t.Fatalf("string = %s", expected) + } + if strings.Index(err.Error(), "RequiredField.Label") < 0 { + t.Errorf("marshal-2 wrong err msg: %v", err) + } + if !equal(bytes, expected, t) { + o.DebugPrint("neq 2", bytes) + t.Fatalf("string = %s", expected) + } +} + +func fuzzUnmarshal(t *testing.T, data []byte) { + defer func() { + if e := recover(); e != nil { + t.Errorf("These bytes caused a panic: %+v", data) + t.Logf("Stack:\n%s", debug.Stack()) + t.FailNow() + } + }() + + pb := new(MyMessage) + Unmarshal(data, pb) +} + +func TestMapFieldMarshal(t *testing.T) { + m := &MessageWithMap{ + NameMapping: map[int32]string{ + 1: "Rob", + 4: "Ian", + 8: "Dave", + }, + } + b, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + + // b should be the concatenation of these three byte sequences in some order. + parts := []string{ + "\n\a\b\x01\x12\x03Rob", + "\n\a\b\x04\x12\x03Ian", + "\n\b\b\x08\x12\x04Dave", + } + ok := false + for i := range parts { + for j := range parts { + if j == i { + continue + } + for k := range parts { + if k == i || k == j { + continue + } + try := parts[i] + parts[j] + parts[k] + if bytes.Equal(b, []byte(try)) { + ok = true + break + } + } + } + } + if !ok { + t.Fatalf("Incorrect Marshal output.\n got %q\nwant %q (or a permutation of that)", b, parts[0]+parts[1]+parts[2]) + } + t.Logf("FYI b: %q", b) + + (new(Buffer)).DebugPrint("Dump of b", b) +} + +func TestMapFieldRoundTrips(t *testing.T) { + m := &MessageWithMap{ + NameMapping: map[int32]string{ + 1: "Rob", + 4: "Ian", + 8: "Dave", + }, + MsgMapping: map[int64]*FloatingPoint{ + 0x7001: &FloatingPoint{F: Float64(2.0)}, + }, + ByteMapping: map[bool][]byte{ + false: []byte("that's not right!"), + true: []byte("aye, 'tis true!"), + }, + } + b, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + t.Logf("FYI b: %q", b) + m2 := new(MessageWithMap) + if err := Unmarshal(b, m2); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + for _, pair := range [][2]interface{}{ + {m.NameMapping, m2.NameMapping}, + {m.MsgMapping, m2.MsgMapping}, + {m.ByteMapping, m2.ByteMapping}, + } { + if !reflect.DeepEqual(pair[0], pair[1]) { + t.Errorf("Map did not survive a round trip.\ninitial: %v\n final: %v", pair[0], pair[1]) + } + } +} + +func TestMapFieldWithNil(t *testing.T) { + m := &MessageWithMap{ + MsgMapping: map[int64]*FloatingPoint{ + 1: nil, + }, + } + b, err := Marshal(m) + if err == nil { + t.Fatalf("Marshal of bad map should have failed, got these bytes: %v", b) + } +} + +func TestOneof(t *testing.T) { + m := &Communique{} + b, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal of empty message with oneof: %v", err) + } + if len(b) != 0 { + t.Errorf("Marshal of empty message yielded too many bytes: %v", b) + } + + m = &Communique{ + Union: &Communique_Name{"Barry"}, + } + + // Round-trip. + b, err = Marshal(m) + if err != nil { + t.Fatalf("Marshal of message with oneof: %v", err) + } + if len(b) != 7 { // name tag/wire (1) + name len (1) + name (5) + t.Errorf("Incorrect marshal of message with oneof: %v", b) + } + m.Reset() + if err := Unmarshal(b, m); err != nil { + t.Fatalf("Unmarshal of message with oneof: %v", err) + } + if x, ok := m.Union.(*Communique_Name); !ok || x.Name != "Barry" { + t.Errorf("After round trip, Union = %+v", m.Union) + } + if name := m.GetName(); name != "Barry" { + t.Errorf("After round trip, GetName = %q, want %q", name, "Barry") + } + + // Let's try with a message in the oneof. + m.Union = &Communique_Msg{&Strings{StringField: String("deep deep string")}} + b, err = Marshal(m) + if err != nil { + t.Fatalf("Marshal of message with oneof set to message: %v", err) + } + if len(b) != 20 { // msg tag/wire (1) + msg len (1) + msg (1 + 1 + 16) + t.Errorf("Incorrect marshal of message with oneof set to message: %v", b) + } + m.Reset() + if err := Unmarshal(b, m); err != nil { + t.Fatalf("Unmarshal of message with oneof set to message: %v", err) + } + ss, ok := m.Union.(*Communique_Msg) + if !ok || ss.Msg.GetStringField() != "deep deep string" { + t.Errorf("After round trip with oneof set to message, Union = %+v", m.Union) + } +} + +// Benchmarks + +func testMsg() *GoTest { + pb := initGoTest(true) + const N = 1000 // Internally the library starts much smaller. + pb.F_Int32Repeated = make([]int32, N) + pb.F_DoubleRepeated = make([]float64, N) + for i := 0; i < N; i++ { + pb.F_Int32Repeated[i] = int32(i) + pb.F_DoubleRepeated[i] = float64(i) + } + return pb +} + +func bytesMsg() *GoTest { + pb := initGoTest(true) + buf := make([]byte, 4000) + for i := range buf { + buf[i] = byte(i) + } + pb.F_BytesDefaulted = buf + return pb +} + +func benchmarkMarshal(b *testing.B, pb Message, marshal func(Message) ([]byte, error)) { + d, _ := marshal(pb) + b.SetBytes(int64(len(d))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + marshal(pb) + } +} + +func benchmarkBufferMarshal(b *testing.B, pb Message) { + p := NewBuffer(nil) + benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { + p.Reset() + err := p.Marshal(pb0) + return p.Bytes(), err + }) +} + +func benchmarkSize(b *testing.B, pb Message) { + benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { + Size(pb) + return nil, nil + }) +} + +func newOf(pb Message) Message { + in := reflect.ValueOf(pb) + if in.IsNil() { + return pb + } + return reflect.New(in.Type().Elem()).Interface().(Message) +} + +func benchmarkUnmarshal(b *testing.B, pb Message, unmarshal func([]byte, Message) error) { + d, _ := Marshal(pb) + b.SetBytes(int64(len(d))) + pbd := newOf(pb) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + unmarshal(d, pbd) + } +} + +func benchmarkBufferUnmarshal(b *testing.B, pb Message) { + p := NewBuffer(nil) + benchmarkUnmarshal(b, pb, func(d []byte, pb0 Message) error { + p.SetBuf(d) + return p.Unmarshal(pb0) + }) +} + +// Benchmark{Marshal,BufferMarshal,Size,Unmarshal,BufferUnmarshal}{,Bytes} + +func BenchmarkMarshal(b *testing.B) { + benchmarkMarshal(b, testMsg(), Marshal) +} + +func BenchmarkBufferMarshal(b *testing.B) { + benchmarkBufferMarshal(b, testMsg()) +} + +func BenchmarkSize(b *testing.B) { + benchmarkSize(b, testMsg()) +} + +func BenchmarkUnmarshal(b *testing.B) { + benchmarkUnmarshal(b, testMsg(), Unmarshal) +} + +func BenchmarkBufferUnmarshal(b *testing.B) { + benchmarkBufferUnmarshal(b, testMsg()) +} + +func BenchmarkMarshalBytes(b *testing.B) { + benchmarkMarshal(b, bytesMsg(), Marshal) +} + +func BenchmarkBufferMarshalBytes(b *testing.B) { + benchmarkBufferMarshal(b, bytesMsg()) +} + +func BenchmarkSizeBytes(b *testing.B) { + benchmarkSize(b, bytesMsg()) +} + +func BenchmarkUnmarshalBytes(b *testing.B) { + benchmarkUnmarshal(b, bytesMsg(), Unmarshal) +} + +func BenchmarkBufferUnmarshalBytes(b *testing.B) { + benchmarkBufferUnmarshal(b, bytesMsg()) +} + +func BenchmarkUnmarshalUnrecognizedFields(b *testing.B) { + b.StopTimer() + pb := initGoTestField() + skip := &GoSkipTest{ + SkipInt32: Int32(32), + SkipFixed32: Uint32(3232), + SkipFixed64: Uint64(6464), + SkipString: String("skipper"), + Skipgroup: &GoSkipTest_SkipGroup{ + GroupInt32: Int32(75), + GroupString: String("wxyz"), + }, + } + + pbd := new(GoTestField) + p := NewBuffer(nil) + p.Marshal(pb) + p.Marshal(skip) + p2 := NewBuffer(nil) + + b.StartTimer() + for i := 0; i < b.N; i++ { + p2.SetBuf(p.Bytes()) + p2.Unmarshal(pbd) + } +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone.go new file mode 100644 index 000000000..84bbaf433 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone.go @@ -0,0 +1,223 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer deep copy and merge. +// TODO: MessageSet and RawMessage. + +package proto + +import ( + "log" + "reflect" + "strings" +) + +// Clone returns a deep copy of a protocol buffer. +func Clone(pb Message) Message { + in := reflect.ValueOf(pb) + if in.IsNil() { + return pb + } + + out := reflect.New(in.Type().Elem()) + // out is empty so a merge is a deep copy. + mergeStruct(out.Elem(), in.Elem()) + return out.Interface().(Message) +} + +// Merge merges src into dst. +// Required and optional fields that are set in src will be set to that value in dst. +// Elements of repeated fields will be appended. +// Merge panics if src and dst are not the same type, or if dst is nil. +func Merge(dst, src Message) { + in := reflect.ValueOf(src) + out := reflect.ValueOf(dst) + if out.IsNil() { + panic("proto: nil destination") + } + if in.Type() != out.Type() { + // Explicit test prior to mergeStruct so that mistyped nils will fail + panic("proto: type mismatch") + } + if in.IsNil() { + // Merging nil into non-nil is a quiet no-op + return + } + mergeStruct(out.Elem(), in.Elem()) +} + +func mergeStruct(out, in reflect.Value) { + sprop := GetProperties(in.Type()) + for i := 0; i < in.NumField(); i++ { + f := in.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) + } + + if emIn, ok := in.Addr().Interface().(extendableProto); ok { + emOut := out.Addr().Interface().(extendableProto) + mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap()) + } + + uf := in.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return + } + uin := uf.Bytes() + if len(uin) > 0 { + out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) + } +} + +// mergeAny performs a merge between two values of the same type. +// viaPtr indicates whether the values were indirected through a pointer (implying proto2). +// prop is set if this is a struct field (it may be nil). +func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { + if in.Type() == protoMessageType { + if !in.IsNil() { + if out.IsNil() { + out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) + } else { + Merge(out.Interface().(Message), in.Interface().(Message)) + } + } + return + } + switch in.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + if !viaPtr && isProto3Zero(in) { + return + } + out.Set(in) + case reflect.Interface: + // Probably a oneof field; copy non-nil values. + if in.IsNil() { + return + } + // Allocate destination if it is not set, or set to a different type. + // Otherwise we will merge as normal. + if out.IsNil() || out.Elem().Type() != in.Elem().Type() { + out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) + } + mergeAny(out.Elem(), in.Elem(), false, nil) + case reflect.Map: + if in.Len() == 0 { + return + } + if out.IsNil() { + out.Set(reflect.MakeMap(in.Type())) + } + // For maps with value types of *T or []byte we need to deep copy each value. + elemKind := in.Type().Elem().Kind() + for _, key := range in.MapKeys() { + var val reflect.Value + switch elemKind { + case reflect.Ptr: + val = reflect.New(in.Type().Elem().Elem()) + mergeAny(val, in.MapIndex(key), false, nil) + case reflect.Slice: + val = in.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + default: + val = in.MapIndex(key) + } + out.SetMapIndex(key, val) + } + case reflect.Ptr: + if in.IsNil() { + return + } + if out.IsNil() { + out.Set(reflect.New(in.Elem().Type())) + } + mergeAny(out.Elem(), in.Elem(), true, nil) + case reflect.Slice: + if in.IsNil() { + return + } + if in.Type().Elem().Kind() == reflect.Uint8 { + // []byte is a scalar bytes field, not a repeated field. + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value, and should not + // be merged. + if prop != nil && prop.proto3 && in.Len() == 0 { + return + } + + // Make a deep copy. + // Append to []byte{} instead of []byte(nil) so that we never end up + // with a nil result. + out.SetBytes(append([]byte{}, in.Bytes()...)) + return + } + n := in.Len() + if out.IsNil() { + out.Set(reflect.MakeSlice(in.Type(), 0, n)) + } + switch in.Type().Elem().Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(reflect.AppendSlice(out, in)) + default: + for i := 0; i < n; i++ { + x := reflect.Indirect(reflect.New(in.Type().Elem())) + mergeAny(x, in.Index(i), false, nil) + out.Set(reflect.Append(out, x)) + } + } + case reflect.Struct: + mergeStruct(out, in) + default: + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to copy %v", in) + } +} + +func mergeExtension(out, in map[int32]Extension) { + for extNum, eIn := range in { + eOut := Extension{desc: eIn.desc} + if eIn.value != nil { + v := reflect.New(reflect.TypeOf(eIn.value)).Elem() + mergeAny(v, reflect.ValueOf(eIn.value), false, nil) + eOut.value = v.Interface() + } + if eIn.enc != nil { + eOut.enc = make([]byte, len(eIn.enc)) + copy(eOut.enc, eIn.enc) + } + + out[extNum] = eOut + } +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone_test.go new file mode 100644 index 000000000..76720f18b --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone_test.go @@ -0,0 +1,267 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "testing" + + "github.com/golang/protobuf/proto" + + proto3pb "github.com/golang/protobuf/proto/proto3_proto" + pb "github.com/golang/protobuf/proto/testdata" +) + +var cloneTestMessage = &pb.MyMessage{ + Count: proto.Int32(42), + Name: proto.String("Dave"), + Pet: []string{"bunny", "kitty", "horsey"}, + Inner: &pb.InnerMessage{ + Host: proto.String("niles"), + Port: proto.Int32(9099), + Connected: proto.Bool(true), + }, + Others: []*pb.OtherMessage{ + { + Value: []byte("some bytes"), + }, + }, + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(6), + }, + RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, +} + +func init() { + ext := &pb.Ext{ + Data: proto.String("extension"), + } + if err := proto.SetExtension(cloneTestMessage, pb.E_Ext_More, ext); err != nil { + panic("SetExtension: " + err.Error()) + } +} + +func TestClone(t *testing.T) { + m := proto.Clone(cloneTestMessage).(*pb.MyMessage) + if !proto.Equal(m, cloneTestMessage) { + t.Errorf("Clone(%v) = %v", cloneTestMessage, m) + } + + // Verify it was a deep copy. + *m.Inner.Port++ + if proto.Equal(m, cloneTestMessage) { + t.Error("Mutating clone changed the original") + } + // Byte fields and repeated fields should be copied. + if &m.Pet[0] == &cloneTestMessage.Pet[0] { + t.Error("Pet: repeated field not copied") + } + if &m.Others[0] == &cloneTestMessage.Others[0] { + t.Error("Others: repeated field not copied") + } + if &m.Others[0].Value[0] == &cloneTestMessage.Others[0].Value[0] { + t.Error("Others[0].Value: bytes field not copied") + } + if &m.RepBytes[0] == &cloneTestMessage.RepBytes[0] { + t.Error("RepBytes: repeated field not copied") + } + if &m.RepBytes[0][0] == &cloneTestMessage.RepBytes[0][0] { + t.Error("RepBytes[0]: bytes field not copied") + } +} + +func TestCloneNil(t *testing.T) { + var m *pb.MyMessage + if c := proto.Clone(m); !proto.Equal(m, c) { + t.Errorf("Clone(%v) = %v", m, c) + } +} + +var mergeTests = []struct { + src, dst, want proto.Message +}{ + { + src: &pb.MyMessage{ + Count: proto.Int32(42), + }, + dst: &pb.MyMessage{ + Name: proto.String("Dave"), + }, + want: &pb.MyMessage{ + Count: proto.Int32(42), + Name: proto.String("Dave"), + }, + }, + { + src: &pb.MyMessage{ + Inner: &pb.InnerMessage{ + Host: proto.String("hey"), + Connected: proto.Bool(true), + }, + Pet: []string{"horsey"}, + Others: []*pb.OtherMessage{ + { + Value: []byte("some bytes"), + }, + }, + }, + dst: &pb.MyMessage{ + Inner: &pb.InnerMessage{ + Host: proto.String("niles"), + Port: proto.Int32(9099), + }, + Pet: []string{"bunny", "kitty"}, + Others: []*pb.OtherMessage{ + { + Key: proto.Int64(31415926535), + }, + { + // Explicitly test a src=nil field + Inner: nil, + }, + }, + }, + want: &pb.MyMessage{ + Inner: &pb.InnerMessage{ + Host: proto.String("hey"), + Connected: proto.Bool(true), + Port: proto.Int32(9099), + }, + Pet: []string{"bunny", "kitty", "horsey"}, + Others: []*pb.OtherMessage{ + { + Key: proto.Int64(31415926535), + }, + {}, + { + Value: []byte("some bytes"), + }, + }, + }, + }, + { + src: &pb.MyMessage{ + RepBytes: [][]byte{[]byte("wow")}, + }, + dst: &pb.MyMessage{ + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(6), + }, + RepBytes: [][]byte{[]byte("sham")}, + }, + want: &pb.MyMessage{ + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(6), + }, + RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, + }, + }, + // Check that a scalar bytes field replaces rather than appends. + { + src: &pb.OtherMessage{Value: []byte("foo")}, + dst: &pb.OtherMessage{Value: []byte("bar")}, + want: &pb.OtherMessage{Value: []byte("foo")}, + }, + { + src: &pb.MessageWithMap{ + NameMapping: map[int32]string{6: "Nigel"}, + MsgMapping: map[int64]*pb.FloatingPoint{ + 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)}, + }, + ByteMapping: map[bool][]byte{true: []byte("wowsa")}, + }, + dst: &pb.MessageWithMap{ + NameMapping: map[int32]string{ + 6: "Bruce", // should be overwritten + 7: "Andrew", + }, + }, + want: &pb.MessageWithMap{ + NameMapping: map[int32]string{ + 6: "Nigel", + 7: "Andrew", + }, + MsgMapping: map[int64]*pb.FloatingPoint{ + 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)}, + }, + ByteMapping: map[bool][]byte{true: []byte("wowsa")}, + }, + }, + // proto3 shouldn't merge zero values, + // in the same way that proto2 shouldn't merge nils. + { + src: &proto3pb.Message{ + Name: "Aaron", + Data: []byte(""), // zero value, but not nil + }, + dst: &proto3pb.Message{ + HeightInCm: 176, + Data: []byte("texas!"), + }, + want: &proto3pb.Message{ + Name: "Aaron", + HeightInCm: 176, + Data: []byte("texas!"), + }, + }, + // Oneof fields should merge by assignment. + { + src: &pb.Communique{ + Union: &pb.Communique_Number{41}, + }, + dst: &pb.Communique{ + Union: &pb.Communique_Name{"Bobby Tables"}, + }, + want: &pb.Communique{ + Union: &pb.Communique_Number{41}, + }, + }, + // Oneof nil is the same as not set. + { + src: &pb.Communique{}, + dst: &pb.Communique{ + Union: &pb.Communique_Name{"Bobby Tables"}, + }, + want: &pb.Communique{ + Union: &pb.Communique_Name{"Bobby Tables"}, + }, + }, +} + +func TestMerge(t *testing.T) { + for _, m := range mergeTests { + got := proto.Clone(m.dst) + proto.Merge(got, m.src) + if !proto.Equal(got, m.want) { + t.Errorf("Merge(%v, %v)\n got %v\nwant %v\n", m.dst, m.src, got, m.want) + } + } +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/decode.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/decode.go new file mode 100644 index 000000000..84866356c --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/decode.go @@ -0,0 +1,863 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for decoding protocol buffer data to construct in-memory representations. + */ + +import ( + "errors" + "fmt" + "io" + "os" + "reflect" +) + +// errOverflow is returned when an integer is too large to be represented. +var errOverflow = errors.New("proto: integer overflow") + +// ErrInternalBadWireType is returned by generated code when an incorrect +// wire type is encountered. It does not get returned to user code. +var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") + +// The fundamental decoders that interpret bytes on the wire. +// Those that take integer types all return uint64 and are +// therefore of type valueDecoder. + +// DecodeVarint reads a varint-encoded integer from the slice. +// It returns the integer and the number of bytes consumed, or +// zero if there is not enough. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func DecodeVarint(buf []byte) (x uint64, n int) { + // x, n already 0 + for shift := uint(0); shift < 64; shift += 7 { + if n >= len(buf) { + return 0, 0 + } + b := uint64(buf[n]) + n++ + x |= (b & 0x7F) << shift + if (b & 0x80) == 0 { + return x, n + } + } + + // The number is too large to represent in a 64-bit value. + return 0, 0 +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + // x, err already 0 + + i := p.index + l := len(p.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := p.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + p.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := p.index + 8 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-8]) + x |= uint64(p.buf[i-7]) << 8 + x |= uint64(p.buf[i-6]) << 16 + x |= uint64(p.buf[i-5]) << 24 + x |= uint64(p.buf[i-4]) << 32 + x |= uint64(p.buf[i-3]) << 40 + x |= uint64(p.buf[i-2]) << 48 + x |= uint64(p.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := p.index + 4 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-4]) + x |= uint64(p.buf[i-3]) << 8 + x |= uint64(p.buf[i-2]) << 16 + x |= uint64(p.buf[i-1]) << 24 + return +} + +// DecodeZigzag64 reads a zigzag-encoded 64-bit integer +// from the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) DecodeZigzag64() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) + return +} + +// DecodeZigzag32 reads a zigzag-encoded 32-bit integer +// from the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) DecodeZigzag32() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) + return +} + +// These are not ValueDecoders: they produce an array of bytes or a string. +// bytes, embedded messages + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := p.DecodeVarint() + if err != nil { + return nil, err + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := p.index + nb + if end < p.index || end > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + // todo: check if can get more uses of alloc=false + buf = p.buf[p.index:end] + p.index += nb + return + } + + buf = make([]byte, nb) + copy(buf, p.buf[p.index:]) + p.index += nb + return +} + +// DecodeStringBytes reads an encoded string from the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) DecodeStringBytes() (s string, err error) { + buf, err := p.DecodeRawBytes(false) + if err != nil { + return + } + return string(buf), nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +// If the protocol buffer has extensions, and the field matches, add it as an extension. +// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. +func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { + oi := o.index + + err := o.skip(t, tag, wire) + if err != nil { + return err + } + + if !unrecField.IsValid() { + return nil + } + + ptr := structPointer_Bytes(base, unrecField) + + // Add the skipped field to struct field + obuf := o.buf + + o.buf = *ptr + o.EncodeVarint(uint64(tag<<3 | wire)) + *ptr = append(o.buf, obuf[oi:o.index]...) + + o.buf = obuf + + return nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +func (o *Buffer) skip(t reflect.Type, tag, wire int) error { + + var u uint64 + var err error + + switch wire { + case WireVarint: + _, err = o.DecodeVarint() + case WireFixed64: + _, err = o.DecodeFixed64() + case WireBytes: + _, err = o.DecodeRawBytes(false) + case WireFixed32: + _, err = o.DecodeFixed32() + case WireStartGroup: + for { + u, err = o.DecodeVarint() + if err != nil { + break + } + fwire := int(u & 0x7) + if fwire == WireEndGroup { + break + } + ftag := int(u >> 3) + err = o.skip(t, ftag, fwire) + if err != nil { + break + } + } + default: + err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) + } + return err +} + +// Unmarshaler is the interface representing objects that can +// unmarshal themselves. The method should reset the receiver before +// decoding starts. The argument points to data that may be +// overwritten, so implementations should not keep references to the +// buffer. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// Unmarshal parses the protocol buffer representation in buf and places the +// decoded result in pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// Unmarshal resets pb before starting to unmarshal, so any +// existing data in pb is always removed. Use UnmarshalMerge +// to preserve and append to existing data. +func Unmarshal(buf []byte, pb Message) error { + pb.Reset() + return UnmarshalMerge(buf, pb) +} + +// UnmarshalMerge parses the protocol buffer representation in buf and +// writes the decoded result to pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// UnmarshalMerge merges into existing data in pb. +// Most code should use Unmarshal instead. +func UnmarshalMerge(buf []byte, pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// DecodeMessage reads a count-delimited message from the Buffer. +func (p *Buffer) DecodeMessage(pb Message) error { + enc, err := p.DecodeRawBytes(false) + if err != nil { + return err + } + return NewBuffer(enc).Unmarshal(pb) +} + +// DecodeGroup reads a tag-delimited group from the Buffer. +func (p *Buffer) DecodeGroup(pb Message) error { + typ, base, err := getbase(pb) + if err != nil { + return err + } + return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) +} + +// Unmarshal parses the protocol buffer representation in the +// Buffer and places the decoded result in pb. If the struct +// underlying pb does not match the data in the buffer, the results can be +// unpredictable. +func (p *Buffer) Unmarshal(pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + + typ, base, err := getbase(pb) + if err != nil { + return err + } + + err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) + + if collectStats { + stats.Decode++ + } + + return err +} + +// unmarshalType does the work of unmarshaling a structure. +func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { + var state errorState + required, reqFields := prop.reqCount, uint64(0) + + var err error + for err == nil && o.index < len(o.buf) { + oi := o.index + var u uint64 + u, err = o.DecodeVarint() + if err != nil { + break + } + wire := int(u & 0x7) + if wire == WireEndGroup { + if is_group { + return nil // input is satisfied + } + return fmt.Errorf("proto: %s: wiretype end group for non-group", st) + } + tag := int(u >> 3) + if tag <= 0 { + return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) + } + fieldnum, ok := prop.decoderTags.get(tag) + if !ok { + // Maybe it's an extension? + if prop.extendable { + if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) { + if err = o.skip(st, tag, wire); err == nil { + ext := e.ExtensionMap()[int32(tag)] // may be missing + ext.enc = append(ext.enc, o.buf[oi:o.index]...) + e.ExtensionMap()[int32(tag)] = ext + } + continue + } + } + // Maybe it's a oneof? + if prop.oneofUnmarshaler != nil { + m := structPointer_Interface(base, st).(Message) + // First return value indicates whether tag is a oneof field. + ok, err = prop.oneofUnmarshaler(m, tag, wire, o) + if err == ErrInternalBadWireType { + // Map the error to something more descriptive. + // Do the formatting here to save generated code space. + err = fmt.Errorf("bad wiretype for oneof field in %T", m) + } + if ok { + continue + } + } + err = o.skipAndSave(st, tag, wire, base, prop.unrecField) + continue + } + p := prop.Prop[fieldnum] + + if p.dec == nil { + fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) + continue + } + dec := p.dec + if wire != WireStartGroup && wire != p.WireType { + if wire == WireBytes && p.packedDec != nil { + // a packable field + dec = p.packedDec + } else { + err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) + continue + } + } + decErr := dec(o, p, base) + if decErr != nil && !state.shouldContinue(decErr, p) { + err = decErr + } + if err == nil && p.Required { + // Successfully decoded a required field. + if tag <= 64 { + // use bitmap for fields 1-64 to catch field reuse. + var mask uint64 = 1 << uint64(tag-1) + if reqFields&mask == 0 { + // new required field + reqFields |= mask + required-- + } + } else { + // This is imprecise. It can be fooled by a required field + // with a tag > 64 that is encoded twice; that's very rare. + // A fully correct implementation would require allocating + // a data structure, which we would like to avoid. + required-- + } + } + } + if err == nil { + if is_group { + return io.ErrUnexpectedEOF + } + if state.err != nil { + return state.err + } + if required > 0 { + // Not enough information to determine the exact field. If we use extra + // CPU, we could determine the field only if the missing required field + // has a tag <= 64 and we check reqFields. + return &RequiredNotSetError{"{Unknown}"} + } + } + return err +} + +// Individual type decoders +// For each, +// u is the decoded value, +// v is a pointer to the field (pointer) in the struct + +// Sizes of the pools to allocate inside the Buffer. +// The goal is modest amortization and allocation +// on at least 16-byte boundaries. +const ( + boolPoolSize = 16 + uint32PoolSize = 8 + uint64PoolSize = 4 +) + +// Decode a bool. +func (o *Buffer) dec_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + if len(o.bools) == 0 { + o.bools = make([]bool, boolPoolSize) + } + o.bools[0] = u != 0 + *structPointer_Bool(base, p.field) = &o.bools[0] + o.bools = o.bools[1:] + return nil +} + +func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + *structPointer_BoolVal(base, p.field) = u != 0 + return nil +} + +// Decode an int32. +func (o *Buffer) dec_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) + return nil +} + +func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) + return nil +} + +// Decode an int64. +func (o *Buffer) dec_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64_Set(structPointer_Word64(base, p.field), o, u) + return nil +} + +func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64Val_Set(structPointer_Word64Val(base, p.field), o, u) + return nil +} + +// Decode a string. +func (o *Buffer) dec_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_String(base, p.field) = &s + return nil +} + +func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_StringVal(base, p.field) = s + return nil +} + +// Decode a slice of bytes ([]byte). +func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + *structPointer_Bytes(base, p.field) = b + return nil +} + +// Decode a slice of bools ([]bool). +func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + v := structPointer_BoolSlice(base, p.field) + *v = append(*v, u != 0) + return nil +} + +// Decode a slice of bools ([]bool) in packed format. +func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { + v := structPointer_BoolSlice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded bools + + y := *v + for i := 0; i < nb; i++ { + u, err := p.valDec(o) + if err != nil { + return err + } + y = append(y, u != 0) + } + + *v = y + return nil +} + +// Decode a slice of int32s ([]int32). +func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + structPointer_Word32Slice(base, p.field).Append(uint32(u)) + return nil +} + +// Decode a slice of int32s ([]int32) in packed format. +func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int32s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(uint32(u)) + } + return nil +} + +// Decode a slice of int64s ([]int64). +func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + + structPointer_Word64Slice(base, p.field).Append(u) + return nil +} + +// Decode a slice of int64s ([]int64) in packed format. +func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int64s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(u) + } + return nil +} + +// Decode a slice of strings ([]string). +func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + v := structPointer_StringSlice(base, p.field) + *v = append(*v, s) + return nil +} + +// Decode a slice of slice of bytes ([][]byte). +func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + v := structPointer_BytesSlice(base, p.field) + *v = append(*v, b) + return nil +} + +// Decode a map field. +func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + oi := o.index // index at the end of this map entry + o.index -= len(raw) // move buffer back to start of map entry + + mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V + if mptr.Elem().IsNil() { + mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) + } + v := mptr.Elem() // map[K]V + + // Prepare addressable doubly-indirect placeholders for the key and value types. + // See enc_new_map for why. + keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K + keybase := toStructPointer(keyptr.Addr()) // **K + + var valbase structPointer + var valptr reflect.Value + switch p.mtype.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valptr = reflect.ValueOf(&dummy) // *[]byte + valbase = toStructPointer(valptr) // *[]byte + case reflect.Ptr: + // message; valptr is **Msg; need to allocate the intermediate pointer + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valptr.Set(reflect.New(valptr.Type().Elem())) + valbase = toStructPointer(valptr) + default: + // everything else + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valbase = toStructPointer(valptr.Addr()) // **V + } + + // Decode. + // This parses a restricted wire format, namely the encoding of a message + // with two fields. See enc_new_map for the format. + for o.index < oi { + // tagcode for key and value properties are always a single byte + // because they have tags 1 and 2. + tagcode := o.buf[o.index] + o.index++ + switch tagcode { + case p.mkeyprop.tagcode[0]: + if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { + return err + } + case p.mvalprop.tagcode[0]: + if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { + return err + } + default: + // TODO: Should we silently skip this instead? + return fmt.Errorf("proto: bad map data tag %d", raw[0]) + } + } + keyelem, valelem := keyptr.Elem(), valptr.Elem() + if !keyelem.IsValid() || !valelem.IsValid() { + // We did not decode the key or the value in the map entry. + // Either way, it's an invalid map entry. + return fmt.Errorf("proto: bad map data: missing key/val") + } + + v.SetMapIndex(keyelem, valelem) + return nil +} + +// Decode a group. +func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + return o.unmarshalType(p.stype, p.sprop, true, bas) +} + +// Decode an embedded message. +func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { + raw, e := o.DecodeRawBytes(false) + if e != nil { + return e + } + + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := structPointer_Interface(bas, p.stype) + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, false, bas) + o.buf = obuf + o.index = oi + + return err +} + +// Decode a slice of embedded messages. +func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, false, base) +} + +// Decode a slice of embedded groups. +func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, true, base) +} + +// Decode a slice of structs ([]*struct). +func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { + v := reflect.New(p.stype) + bas := toStructPointer(v) + structPointer_StructPointerSlice(base, p.field).Append(bas) + + if is_group { + err := o.unmarshalType(p.stype, p.sprop, is_group, bas) + return err + } + + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := v.Interface() + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, is_group, bas) + + o.buf = obuf + o.index = oi + + return err +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go new file mode 100644 index 000000000..89d0caa82 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go @@ -0,0 +1,1336 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "errors" + "fmt" + "reflect" + "sort" +) + +// RequiredNotSetError is the error returned if Marshal is called with +// a protocol buffer struct whose required fields have not +// all been initialized. It is also the error returned if Unmarshal is +// called with an encoded protocol buffer that does not include all the +// required fields. +// +// When printed, RequiredNotSetError reports the first unset required field in a +// message. If the field cannot be precisely determined, it is reported as +// "{Unknown}". +type RequiredNotSetError struct { + field string +} + +func (e *RequiredNotSetError) Error() string { + return fmt.Sprintf("proto: required field %q not set", e.field) +} + +var ( + // errRepeatedHasNil is the error returned if Marshal is called with + // a struct with a repeated field containing a nil element. + errRepeatedHasNil = errors.New("proto: repeated field has nil element") + + // ErrNil is the error returned if Marshal is called with nil. + ErrNil = errors.New("proto: Marshal called with nil") +) + +// The fundamental encoders that put bytes on the wire. +// Those that take integer types all accept uint64 and are +// therefore of type valueEncoder. + +const maxVarintBytes = 10 // maximum length of a varint + +// EncodeVarint returns the varint encoding of x. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +// Not used by the package itself, but helpful to clients +// wishing to use the same encoding. +func EncodeVarint(x uint64) []byte { + var buf [maxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return buf[0:n] +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + p.buf = append(p.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + p.buf = append(p.buf, uint8(x)) + return nil +} + +func sizeVarint(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) EncodeFixed64(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +func sizeFixed64(x uint64) int { + return 8 +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) EncodeFixed32(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +func sizeFixed32(x uint64) int { + return 4 +} + +// EncodeZigzag64 writes a zigzag-encoded 64-bit integer +// to the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) EncodeZigzag64(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +func sizeZigzag64(x uint64) int { + return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +// EncodeZigzag32 writes a zigzag-encoded 32-bit integer +// to the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) EncodeZigzag32(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +func sizeZigzag32(x uint64) int { + return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) EncodeRawBytes(b []byte) error { + p.EncodeVarint(uint64(len(b))) + p.buf = append(p.buf, b...) + return nil +} + +func sizeRawBytes(b []byte) int { + return sizeVarint(uint64(len(b))) + + len(b) +} + +// EncodeStringBytes writes an encoded string to the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) EncodeStringBytes(s string) error { + p.EncodeVarint(uint64(len(s))) + p.buf = append(p.buf, s...) + return nil +} + +func sizeStringBytes(s string) int { + return sizeVarint(uint64(len(s))) + + len(s) +} + +// Marshaler is the interface representing objects that can marshal themselves. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, returning the data. +func Marshal(pb Message) ([]byte, error) { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + return m.Marshal() + } + p := NewBuffer(nil) + err := p.Marshal(pb) + var state errorState + if err != nil && !state.shouldContinue(err, nil) { + return nil, err + } + if p.buf == nil && err == nil { + // Return a non-nil slice on success. + return []byte{}, nil + } + return p.buf, err +} + +// EncodeMessage writes the protocol buffer to the Buffer, +// prefixed by a varint-encoded length. +func (p *Buffer) EncodeMessage(pb Message) error { + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + var state errorState + err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) + } + return err +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, writing the result to the +// Buffer. +func (p *Buffer) Marshal(pb Message) error { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + data, err := m.Marshal() + if err != nil { + return err + } + p.buf = append(p.buf, data...) + return nil + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + err = p.enc_struct(GetProperties(t.Elem()), base) + } + + if collectStats { + stats.Encode++ + } + + return err +} + +// Size returns the encoded size of a protocol buffer. +func Size(pb Message) (n int) { + // Can the object marshal itself? If so, Size is slow. + // TODO: add Size to Marshaler, or add a Sizer interface. + if m, ok := pb.(Marshaler); ok { + b, _ := m.Marshal() + return len(b) + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return 0 + } + if err == nil { + n = size_struct(GetProperties(t.Elem()), base) + } + + if collectStats { + stats.Size++ + } + + return +} + +// Individual type encoders. + +// Encode a bool. +func (o *Buffer) enc_bool(p *Properties, base structPointer) error { + v := *structPointer_Bool(base, p.field) + if v == nil { + return ErrNil + } + x := 0 + if *v { + x = 1 + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { + v := *structPointer_BoolVal(base, p.field) + if !v { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, 1) + return nil +} + +func size_bool(p *Properties, base structPointer) int { + v := *structPointer_Bool(base, p.field) + if v == nil { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +func size_proto3_bool(p *Properties, base structPointer) int { + v := *structPointer_BoolVal(base, p.field) + if !v && !p.oneof { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +// Encode an int32. +func (o *Buffer) enc_int32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func size_proto3_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode a uint32. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := word32_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := word32_Get(v) + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func size_proto3_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode an int64. +func (o *Buffer) enc_int64(p *Properties, base structPointer) error { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return ErrNil + } + x := word64_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func size_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return 0 + } + x := word64_Get(v) + n += len(p.tagcode) + n += p.valSize(x) + return +} + +func size_proto3_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(x) + return +} + +// Encode a string. +func (o *Buffer) enc_string(p *Properties, base structPointer) error { + v := *structPointer_String(base, p.field) + if v == nil { + return ErrNil + } + x := *v + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(x) + return nil +} + +func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { + v := *structPointer_StringVal(base, p.field) + if v == "" { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(v) + return nil +} + +func size_string(p *Properties, base structPointer) (n int) { + v := *structPointer_String(base, p.field) + if v == nil { + return 0 + } + x := *v + n += len(p.tagcode) + n += sizeStringBytes(x) + return +} + +func size_proto3_string(p *Properties, base structPointer) (n int) { + v := *structPointer_StringVal(base, p.field) + if v == "" && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeStringBytes(v) + return +} + +// All protocol buffer fields are nillable, but be careful. +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + return false +} + +// Encode a message struct. +func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { + var state errorState + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return state.err + } + + o.buf = append(o.buf, p.tagcode...) + return o.enc_len_struct(p.sprop, structp, &state) +} + +func size_struct_message(p *Properties, base structPointer) int { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n0 := len(p.tagcode) + n1 := sizeRawBytes(data) + return n0 + n1 + } + + n0 := len(p.tagcode) + n1 := size_struct(p.sprop, structp) + n2 := sizeVarint(uint64(n1)) // size of encoded length + return n0 + n1 + n2 +} + +// Encode a group struct. +func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { + var state errorState + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return ErrNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + err := o.enc_struct(p.sprop, b) + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return state.err +} + +func size_struct_group(p *Properties, base structPointer) (n int) { + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return 0 + } + + n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) + n += size_struct(p.sprop, b) + n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return +} + +// Encode a slice of bools ([]bool). +func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + for _, x := range s { + o.buf = append(o.buf, p.tagcode...) + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_bool(p *Properties, base structPointer) int { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + return l * (len(p.tagcode) + 1) // each bool takes exactly one byte +} + +// Encode a slice of bools ([]bool) in packed format. +func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(l)) // each bool takes exactly one byte + for _, x := range s { + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_packed_bool(p *Properties, base structPointer) (n int) { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + n += len(p.tagcode) + n += sizeVarint(uint64(l)) + n += l // each bool takes exactly one byte + return +} + +// Encode a slice of bytes ([]byte). +func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func size_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if s == nil && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +// Encode a slice of int32s ([]int32). +func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of int32s ([]int32) in packed format. +func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(buf, uint64(x)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + bufSize += p.valSize(uint64(x)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of uint32s ([]uint32). +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := s.Index(i) + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := s.Index(i) + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of uint32s ([]uint32) in packed format. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, uint64(s.Index(i))) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(uint64(s.Index(i))) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of int64s ([]int64). +func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, s.Index(i)) + } + return nil +} + +func size_slice_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + n += p.valSize(s.Index(i)) + } + return +} + +// Encode a slice of int64s ([]int64) in packed format. +func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, s.Index(i)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(s.Index(i)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of slice of bytes ([][]byte). +func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(ss[i]) + } + return nil +} + +func size_slice_slice_byte(p *Properties, base structPointer) (n int) { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return 0 + } + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeRawBytes(ss[i]) + } + return +} + +// Encode a slice of strings ([]string). +func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(ss[i]) + } + return nil +} + +func size_slice_string(p *Properties, base structPointer) (n int) { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeStringBytes(ss[i]) + } + return +} + +// Encode a slice of message structs ([]*struct). +func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return errRepeatedHasNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + continue + } + + o.buf = append(o.buf, p.tagcode...) + err := o.enc_len_struct(p.sprop, structp, &state) + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + } + return state.err +} + +func size_slice_struct_message(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return // return the size up to this point + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n += len(p.tagcode) + n += sizeRawBytes(data) + continue + } + + n0 := size_struct(p.sprop, structp) + n1 := sizeVarint(uint64(n0)) // size of encoded length + n += n0 + n1 + } + return +} + +// Encode a slice of group structs ([]*struct). +func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return errRepeatedHasNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + + err := o.enc_struct(p.sprop, b) + + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + } + return state.err +} + +func size_slice_struct_group(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) + n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return // return size up to this point + } + + n += size_struct(p.sprop, b) + } + return +} + +// Encode an extension map. +func (o *Buffer) enc_map(p *Properties, base structPointer) error { + v := *structPointer_ExtMap(base, p.field) + if err := encodeExtensionMap(v); err != nil { + return err + } + // Fast-path for common cases: zero or one extensions. + if len(v) <= 1 { + for _, e := range v { + o.buf = append(o.buf, e.enc...) + } + return nil + } + + // Sort keys to provide a deterministic encoding. + keys := make([]int, 0, len(v)) + for k := range v { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + o.buf = append(o.buf, v[int32(k)].enc...) + } + return nil +} + +func size_map(p *Properties, base structPointer) int { + v := *structPointer_ExtMap(base, p.field) + return sizeExtensionMap(v) +} + +// Encode a map field. +func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { + var state errorState // XXX: or do we need to plumb this through? + + /* + A map defined as + map map_field = N; + is encoded in the same way as + message MapFieldEntry { + key_type key = 1; + value_type value = 2; + } + repeated MapFieldEntry map_field = N; + */ + + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V + if v.Len() == 0 { + return nil + } + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + enc := func() error { + if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { + return err + } + if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil { + return err + } + return nil + } + + keys := v.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := v.MapIndex(key) + + // The only illegal map entry values are nil message pointers. + if val.Kind() == reflect.Ptr && val.IsNil() { + return errors.New("proto: map has nil element") + } + + keycopy.Set(key) + valcopy.Set(val) + + o.buf = append(o.buf, p.tagcode...) + if err := o.enc_len_thing(enc, &state); err != nil { + return err + } + } + return nil +} + +func size_new_map(p *Properties, base structPointer) int { + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + n := 0 + for _, key := range v.MapKeys() { + val := v.MapIndex(key) + keycopy.Set(key) + valcopy.Set(val) + + // Tag codes for key and val are the responsibility of the sub-sizer. + keysize := p.mkeyprop.size(p.mkeyprop, keybase) + valsize := p.mvalprop.size(p.mvalprop, valbase) + entry := keysize + valsize + // Add on tag code and length of map entry itself. + n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry + } + return n +} + +// mapEncodeScratch returns a new reflect.Value matching the map's value type, +// and a structPointer suitable for passing to an encoder or sizer. +func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { + // Prepare addressable doubly-indirect placeholders for the key and value types. + // This is needed because the element-type encoders expect **T, but the map iteration produces T. + + keycopy = reflect.New(mapType.Key()).Elem() // addressable K + keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K + keyptr.Set(keycopy.Addr()) // + keybase = toStructPointer(keyptr.Addr()) // **K + + // Value types are more varied and require special handling. + switch mapType.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte + valbase = toStructPointer(valcopy.Addr()) + case reflect.Ptr: + // message; the generated field type is map[K]*Msg (so V is *Msg), + // so we only need one level of indirection. + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valbase = toStructPointer(valcopy.Addr()) + default: + // everything else + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V + valptr.Set(valcopy.Addr()) // + valbase = toStructPointer(valptr.Addr()) // **V + } + return +} + +// Encode a struct. +func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { + var state errorState + // Encode fields in tag order so that decoders may use optimizations + // that depend on the ordering. + // https://developers.google.com/protocol-buffers/docs/encoding#order + for _, i := range prop.order { + p := prop.Prop[i] + if p.enc != nil { + err := p.enc(o, p, base) + if err != nil { + if err == ErrNil { + if p.Required && state.err == nil { + state.err = &RequiredNotSetError{p.Name} + } + } else if err == errRepeatedHasNil { + // Give more context to nil values in repeated fields. + return errors.New("repeated field " + p.OrigName + " has nil element") + } else if !state.shouldContinue(err, p) { + return err + } + } + } + } + + // Do oneof fields. + if prop.oneofMarshaler != nil { + m := structPointer_Interface(base, prop.stype).(Message) + if err := prop.oneofMarshaler(m, o); err != nil { + return err + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + if len(v) > 0 { + o.buf = append(o.buf, v...) + } + } + + return state.err +} + +func size_struct(prop *StructProperties, base structPointer) (n int) { + for _, i := range prop.order { + p := prop.Prop[i] + if p.size != nil { + n += p.size(p, base) + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + n += len(v) + } + + // Factor in any oneof fields. + // TODO: This could be faster and use less reflection. + if prop.oneofMarshaler != nil { + sv := reflect.ValueOf(structPointer_Interface(base, prop.stype)).Elem() + for i := 0; i < prop.stype.NumField(); i++ { + fv := sv.Field(i) + if fv.Kind() != reflect.Interface || fv.IsNil() { + continue + } + if prop.stype.Field(i).Tag.Get("protobuf_oneof") == "" { + continue + } + spv := fv.Elem() // interface -> *T + sv := spv.Elem() // *T -> T + sf := sv.Type().Field(0) // StructField inside T + var prop Properties + prop.Init(sf.Type, "whatever", sf.Tag.Get("protobuf"), &sf) + n += prop.size(&prop, toStructPointer(spv)) + } + } + + return +} + +var zeroes [20]byte // longer than any conceivable sizeVarint + +// Encode a struct, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { + return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) +} + +// Encode something, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { + iLen := len(o.buf) + o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length + iMsg := len(o.buf) + err := enc() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + lMsg := len(o.buf) - iMsg + lLen := sizeVarint(uint64(lMsg)) + switch x := lLen - (iMsg - iLen); { + case x > 0: // actual length is x bytes larger than the space we reserved + // Move msg x bytes right. + o.buf = append(o.buf, zeroes[:x]...) + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + case x < 0: // actual length is x bytes smaller than the space we reserved + // Move msg x bytes left. + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + o.buf = o.buf[:len(o.buf)+x] // x is negative + } + // Encode the length in the reserved space. + o.buf = o.buf[:iLen] + o.EncodeVarint(uint64(lMsg)) + o.buf = o.buf[:len(o.buf)+lMsg] + return state.err +} + +// errorState maintains the first error that occurs and updates that error +// with additional context. +type errorState struct { + err error +} + +// shouldContinue reports whether encoding should continue upon encountering the +// given error. If the error is RequiredNotSetError, shouldContinue returns true +// and, if this is the first appearance of that error, remembers it for future +// reporting. +// +// If prop is not nil, it may update any error with additional context about the +// field with the error. +func (s *errorState) shouldContinue(err error, prop *Properties) bool { + // Ignore unset required fields. + reqNotSet, ok := err.(*RequiredNotSetError) + if !ok { + return false + } + if s.err == nil { + if prop != nil { + err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} + } + s.err = err + } + return true +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal.go new file mode 100644 index 000000000..5475c3d95 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal.go @@ -0,0 +1,267 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer comparison. +// TODO: MessageSet. + +package proto + +import ( + "bytes" + "log" + "reflect" + "strings" +) + +/* +Equal returns true iff protocol buffers a and b are equal. +The arguments must both be pointers to protocol buffer structs. + +Equality is defined in this way: + - Two messages are equal iff they are the same type, + corresponding fields are equal, unknown field sets + are equal, and extensions sets are equal. + - Two set scalar fields are equal iff their values are equal. + If the fields are of a floating-point type, remember that + NaN != x for all x, including NaN. + - Two repeated fields are equal iff their lengths are the same, + and their corresponding elements are equal (a "bytes" field, + although represented by []byte, is not a repeated field) + - Two unset fields are equal. + - Two unknown field sets are equal if their current + encoded state is equal. + - Two extension sets are equal iff they have corresponding + elements that are pairwise equal. + - Every other combination of things are not equal. + +The return value is undefined if a and b are not protocol buffers. +*/ +func Equal(a, b Message) bool { + if a == nil || b == nil { + return a == b + } + v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) + if v1.Type() != v2.Type() { + return false + } + if v1.Kind() == reflect.Ptr { + if v1.IsNil() { + return v2.IsNil() + } + if v2.IsNil() { + return false + } + v1, v2 = v1.Elem(), v2.Elem() + } + if v1.Kind() != reflect.Struct { + return false + } + return equalStruct(v1, v2) +} + +// v1 and v2 are known to have the same type. +func equalStruct(v1, v2 reflect.Value) bool { + for i := 0; i < v1.NumField(); i++ { + f := v1.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + f1, f2 := v1.Field(i), v2.Field(i) + if f.Type.Kind() == reflect.Ptr { + if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { + // both unset + continue + } else if n1 != n2 { + // set/unset mismatch + return false + } + b1, ok := f1.Interface().(raw) + if ok { + b2 := f2.Interface().(raw) + // RawMessage + if !bytes.Equal(b1.Bytes(), b2.Bytes()) { + return false + } + continue + } + f1, f2 = f1.Elem(), f2.Elem() + } + if !equalAny(f1, f2) { + return false + } + } + + if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_extensions") + if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { + return false + } + } + + uf := v1.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return true + } + + u1 := uf.Bytes() + u2 := v2.FieldByName("XXX_unrecognized").Bytes() + if !bytes.Equal(u1, u2) { + return false + } + + return true +} + +// v1 and v2 are known to have the same type. +func equalAny(v1, v2 reflect.Value) bool { + if v1.Type() == protoMessageType { + m1, _ := v1.Interface().(Message) + m2, _ := v2.Interface().(Message) + return Equal(m1, m2) + } + switch v1.Kind() { + case reflect.Bool: + return v1.Bool() == v2.Bool() + case reflect.Float32, reflect.Float64: + return v1.Float() == v2.Float() + case reflect.Int32, reflect.Int64: + return v1.Int() == v2.Int() + case reflect.Interface: + // Probably a oneof field; compare the inner values. + n1, n2 := v1.IsNil(), v2.IsNil() + if n1 || n2 { + return n1 == n2 + } + e1, e2 := v1.Elem(), v2.Elem() + if e1.Type() != e2.Type() { + return false + } + return equalAny(e1, e2) + case reflect.Map: + if v1.Len() != v2.Len() { + return false + } + for _, key := range v1.MapKeys() { + val2 := v2.MapIndex(key) + if !val2.IsValid() { + // This key was not found in the second map. + return false + } + if !equalAny(v1.MapIndex(key), val2) { + return false + } + } + return true + case reflect.Ptr: + return equalAny(v1.Elem(), v2.Elem()) + case reflect.Slice: + if v1.Type().Elem().Kind() == reflect.Uint8 { + // short circuit: []byte + if v1.IsNil() != v2.IsNil() { + return false + } + return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) + } + + if v1.Len() != v2.Len() { + return false + } + for i := 0; i < v1.Len(); i++ { + if !equalAny(v1.Index(i), v2.Index(i)) { + return false + } + } + return true + case reflect.String: + return v1.Interface().(string) == v2.Interface().(string) + case reflect.Struct: + return equalStruct(v1, v2) + case reflect.Uint32, reflect.Uint64: + return v1.Uint() == v2.Uint() + } + + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to compare %v", v1) + return false +} + +// base is the struct type that the extensions are based on. +// em1 and em2 are extension maps. +func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool { + if len(em1) != len(em2) { + return false + } + + for extNum, e1 := range em1 { + e2, ok := em2[extNum] + if !ok { + return false + } + + m1, m2 := e1.value, e2.value + + if m1 != nil && m2 != nil { + // Both are unencoded. + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) { + return false + } + continue + } + + // At least one is encoded. To do a semantically correct comparison + // we need to unmarshal them first. + var desc *ExtensionDesc + if m := extensionMaps[base]; m != nil { + desc = m[extNum] + } + if desc == nil { + log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) + continue + } + var err error + if m1 == nil { + m1, err = decodeExtension(e1.enc, desc) + } + if m2 == nil && err == nil { + m2, err = decodeExtension(e2.enc, desc) + } + if err != nil { + // The encoded form is invalid. + log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) + return false + } + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) { + return false + } + } + + return true +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal_test.go new file mode 100644 index 000000000..0e0db8a06 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal_test.go @@ -0,0 +1,209 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "testing" + + . "github.com/golang/protobuf/proto" + pb "github.com/golang/protobuf/proto/testdata" +) + +// Four identical base messages. +// The init function adds extensions to some of them. +var messageWithoutExtension = &pb.MyMessage{Count: Int32(7)} +var messageWithExtension1a = &pb.MyMessage{Count: Int32(7)} +var messageWithExtension1b = &pb.MyMessage{Count: Int32(7)} +var messageWithExtension2 = &pb.MyMessage{Count: Int32(7)} + +// Two messages with non-message extensions. +var messageWithInt32Extension1 = &pb.MyMessage{Count: Int32(8)} +var messageWithInt32Extension2 = &pb.MyMessage{Count: Int32(8)} + +func init() { + ext1 := &pb.Ext{Data: String("Kirk")} + ext2 := &pb.Ext{Data: String("Picard")} + + // messageWithExtension1a has ext1, but never marshals it. + if err := SetExtension(messageWithExtension1a, pb.E_Ext_More, ext1); err != nil { + panic("SetExtension on 1a failed: " + err.Error()) + } + + // messageWithExtension1b is the unmarshaled form of messageWithExtension1a. + if err := SetExtension(messageWithExtension1b, pb.E_Ext_More, ext1); err != nil { + panic("SetExtension on 1b failed: " + err.Error()) + } + buf, err := Marshal(messageWithExtension1b) + if err != nil { + panic("Marshal of 1b failed: " + err.Error()) + } + messageWithExtension1b.Reset() + if err := Unmarshal(buf, messageWithExtension1b); err != nil { + panic("Unmarshal of 1b failed: " + err.Error()) + } + + // messageWithExtension2 has ext2. + if err := SetExtension(messageWithExtension2, pb.E_Ext_More, ext2); err != nil { + panic("SetExtension on 2 failed: " + err.Error()) + } + + if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(23)); err != nil { + panic("SetExtension on Int32-1 failed: " + err.Error()) + } + if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(24)); err != nil { + panic("SetExtension on Int32-2 failed: " + err.Error()) + } +} + +var EqualTests = []struct { + desc string + a, b Message + exp bool +}{ + {"different types", &pb.GoEnum{}, &pb.GoTestField{}, false}, + {"equal empty", &pb.GoEnum{}, &pb.GoEnum{}, true}, + {"nil vs nil", nil, nil, true}, + {"typed nil vs typed nil", (*pb.GoEnum)(nil), (*pb.GoEnum)(nil), true}, + {"typed nil vs empty", (*pb.GoEnum)(nil), &pb.GoEnum{}, false}, + {"different typed nil", (*pb.GoEnum)(nil), (*pb.GoTestField)(nil), false}, + + {"one set field, one unset field", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{}, false}, + {"one set field zero, one unset field", &pb.GoTest{Param: Int32(0)}, &pb.GoTest{}, false}, + {"different set fields", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("bar")}, false}, + {"equal set", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("foo")}, true}, + + {"repeated, one set", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{}, false}, + {"repeated, different length", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{F_Int32Repeated: []int32{2}}, false}, + {"repeated, different value", &pb.GoTest{F_Int32Repeated: []int32{2}}, &pb.GoTest{F_Int32Repeated: []int32{3}}, false}, + {"repeated, equal", &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, true}, + {"repeated, nil equal nil", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: nil}, true}, + {"repeated, nil equal empty", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: []int32{}}, true}, + {"repeated, empty equal nil", &pb.GoTest{F_Int32Repeated: []int32{}}, &pb.GoTest{F_Int32Repeated: nil}, true}, + + { + "nested, different", + &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("foo")}}, + &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("bar")}}, + false, + }, + { + "nested, equal", + &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}}, + &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}}, + true, + }, + + {"bytes", &pb.OtherMessage{Value: []byte("foo")}, &pb.OtherMessage{Value: []byte("foo")}, true}, + {"bytes, empty", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: []byte{}}, true}, + {"bytes, empty vs nil", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: nil}, false}, + { + "repeated bytes", + &pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}}, + &pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}}, + true, + }, + + {"extension vs. no extension", messageWithoutExtension, messageWithExtension1a, false}, + {"extension vs. same extension", messageWithExtension1a, messageWithExtension1b, true}, + {"extension vs. different extension", messageWithExtension1a, messageWithExtension2, false}, + + {"int32 extension vs. itself", messageWithInt32Extension1, messageWithInt32Extension1, true}, + {"int32 extension vs. a different int32", messageWithInt32Extension1, messageWithInt32Extension2, false}, + + { + "message with group", + &pb.MyMessage{ + Count: Int32(1), + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: Int32(5), + }, + }, + &pb.MyMessage{ + Count: Int32(1), + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: Int32(5), + }, + }, + true, + }, + + { + "map same", + &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, + &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, + true, + }, + { + "map different entry", + &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, + &pb.MessageWithMap{NameMapping: map[int32]string{2: "Rob"}}, + false, + }, + { + "map different key only", + &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, + &pb.MessageWithMap{NameMapping: map[int32]string{2: "Ken"}}, + false, + }, + { + "map different value only", + &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, + &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob"}}, + false, + }, + { + "oneof same", + &pb.Communique{Union: &pb.Communique_Number{41}}, + &pb.Communique{Union: &pb.Communique_Number{41}}, + true, + }, + { + "oneof one nil", + &pb.Communique{Union: &pb.Communique_Number{41}}, + &pb.Communique{}, + false, + }, + { + "oneof different", + &pb.Communique{Union: &pb.Communique_Number{41}}, + &pb.Communique{Union: &pb.Communique_Name{"Bobby Tables"}}, + false, + }, +} + +func TestEqual(t *testing.T) { + for _, tc := range EqualTests { + if res := Equal(tc.a, tc.b); res != tc.exp { + t.Errorf("%v: Equal(%v, %v) = %v, want %v", tc.desc, tc.a, tc.b, res, tc.exp) + } + } +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go new file mode 100644 index 000000000..e591ccef7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go @@ -0,0 +1,400 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Types and routines for supporting protocol buffer extensions. + */ + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "sync" +) + +// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. +var ErrMissingExtension = errors.New("proto: missing extension") + +// ExtensionRange represents a range of message extensions for a protocol buffer. +// Used in code generated by the protocol compiler. +type ExtensionRange struct { + Start, End int32 // both inclusive +} + +// extendableProto is an interface implemented by any protocol buffer that may be extended. +type extendableProto interface { + Message + ExtensionRangeArray() []ExtensionRange + ExtensionMap() map[int32]Extension +} + +var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() + +// ExtensionDesc represents an extension specification. +// Used in generated code from the protocol compiler. +type ExtensionDesc struct { + ExtendedType Message // nil pointer to the type that is being extended + ExtensionType interface{} // nil pointer to the extension type + Field int32 // field number + Name string // fully-qualified name of extension, for text formatting + Tag string // protobuf tag style +} + +func (ed *ExtensionDesc) repeated() bool { + t := reflect.TypeOf(ed.ExtensionType) + return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 +} + +// Extension represents an extension in a message. +type Extension struct { + // When an extension is stored in a message using SetExtension + // only desc and value are set. When the message is marshaled + // enc will be set to the encoded form of the message. + // + // When a message is unmarshaled and contains extensions, each + // extension will have only enc set. When such an extension is + // accessed using GetExtension (or GetExtensions) desc and value + // will be set. + desc *ExtensionDesc + value interface{} + enc []byte +} + +// SetRawExtension is for testing only. +func SetRawExtension(base extendableProto, id int32, b []byte) { + base.ExtensionMap()[id] = Extension{enc: b} +} + +// isExtensionField returns true iff the given field number is in an extension range. +func isExtensionField(pb extendableProto, field int32) bool { + for _, er := range pb.ExtensionRangeArray() { + if er.Start <= field && field <= er.End { + return true + } + } + return false +} + +// checkExtensionTypes checks that the given extension is valid for pb. +func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { + // Check the extended type. + if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b { + return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) + } + // Check the range. + if !isExtensionField(pb, extension.Field) { + return errors.New("proto: bad extension number; not in declared ranges") + } + return nil +} + +// extPropKey is sufficient to uniquely identify an extension. +type extPropKey struct { + base reflect.Type + field int32 +} + +var extProp = struct { + sync.RWMutex + m map[extPropKey]*Properties +}{ + m: make(map[extPropKey]*Properties), +} + +func extensionProperties(ed *ExtensionDesc) *Properties { + key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} + + extProp.RLock() + if prop, ok := extProp.m[key]; ok { + extProp.RUnlock() + return prop + } + extProp.RUnlock() + + extProp.Lock() + defer extProp.Unlock() + // Check again. + if prop, ok := extProp.m[key]; ok { + return prop + } + + prop := new(Properties) + prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) + extProp.m[key] = prop + return prop +} + +// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m. +func encodeExtensionMap(m map[int32]Extension) error { + for k, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + p := NewBuffer(nil) + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + if err := props.enc(p, props, toStructPointer(x)); err != nil { + return err + } + e.enc = p.buf + m[k] = e + } + return nil +} + +func sizeExtensionMap(m map[int32]Extension) (n int) { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + n += props.size(props, toStructPointer(x)) + } + return +} + +// HasExtension returns whether the given extension is present in pb. +func HasExtension(pb extendableProto, extension *ExtensionDesc) bool { + // TODO: Check types, field numbers, etc.? + _, ok := pb.ExtensionMap()[extension.Field] + return ok +} + +// ClearExtension removes the given extension from pb. +func ClearExtension(pb extendableProto, extension *ExtensionDesc) { + // TODO: Check types, field numbers, etc.? + delete(pb.ExtensionMap(), extension.Field) +} + +// GetExtension parses and returns the given extension of pb. +// If the extension is not present and has no default value it returns ErrMissingExtension. +func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) { + if err := checkExtensionTypes(pb, extension); err != nil { + return nil, err + } + + emap := pb.ExtensionMap() + e, ok := emap[extension.Field] + if !ok { + // defaultExtensionValue returns the default value or + // ErrMissingExtension if there is no default. + return defaultExtensionValue(extension) + } + + if e.value != nil { + // Already decoded. Check the descriptor, though. + if e.desc != extension { + // This shouldn't happen. If it does, it means that + // GetExtension was called twice with two different + // descriptors with the same field number. + return nil, errors.New("proto: descriptor conflict") + } + return e.value, nil + } + + v, err := decodeExtension(e.enc, extension) + if err != nil { + return nil, err + } + + // Remember the decoded version and drop the encoded version. + // That way it is safe to mutate what we return. + e.value = v + e.desc = extension + e.enc = nil + emap[extension.Field] = e + return e.value, nil +} + +// defaultExtensionValue returns the default value for extension. +// If no default for an extension is defined ErrMissingExtension is returned. +func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + t := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + + sf, _, err := fieldDefault(t, props) + if err != nil { + return nil, err + } + + if sf == nil || sf.value == nil { + // There is no default value. + return nil, ErrMissingExtension + } + + if t.Kind() != reflect.Ptr { + // We do not need to return a Ptr, we can directly return sf.value. + return sf.value, nil + } + + // We need to return an interface{} that is a pointer to sf.value. + value := reflect.New(t).Elem() + value.Set(reflect.New(value.Type().Elem())) + if sf.kind == reflect.Int32 { + // We may have an int32 or an enum, but the underlying data is int32. + // Since we can't set an int32 into a non int32 reflect.value directly + // set it as a int32. + value.Elem().SetInt(int64(sf.value.(int32))) + } else { + value.Elem().Set(reflect.ValueOf(sf.value)) + } + return value.Interface(), nil +} + +// decodeExtension decodes an extension encoded in b. +func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { + o := NewBuffer(b) + + t := reflect.TypeOf(extension.ExtensionType) + rep := extension.repeated() + + props := extensionProperties(extension) + + // t is a pointer to a struct, pointer to basic type or a slice. + // Allocate a "field" to store the pointer/slice itself; the + // pointer/slice will be stored here. We pass + // the address of this field to props.dec. + // This passes a zero field and a *t and lets props.dec + // interpret it as a *struct{ x t }. + value := reflect.New(t).Elem() + + for { + // Discard wire type and field number varint. It isn't needed. + if _, err := o.DecodeVarint(); err != nil { + return nil, err + } + + if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { + return nil, err + } + + if !rep || o.index >= len(o.buf) { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, ok := pb.(extendableProto) + if !ok { + err = errors.New("proto: not an extendable proto") + return + } + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(epb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error { + if err := checkExtensionTypes(pb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return errors.New("proto: bad extension value type") + } + // nil extension values need to be caught early, because the + // encoder can't distinguish an ErrNil due to a nil extension + // from an ErrNil due to a missing field. Extensions are + // always optional, so the encoder would just swallow the error + // and drop all the extensions from the encoded message. + if reflect.ValueOf(value).IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + } + + pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value} + return nil +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions_test.go new file mode 100644 index 000000000..72552767d --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions_test.go @@ -0,0 +1,292 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "fmt" + "reflect" + "testing" + + "github.com/golang/protobuf/proto" + pb "github.com/golang/protobuf/proto/testdata" +) + +func TestGetExtensionsWithMissingExtensions(t *testing.T) { + msg := &pb.MyMessage{} + ext1 := &pb.Ext{} + if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { + t.Fatalf("Could not set ext1: %s", ext1) + } + exts, err := proto.GetExtensions(msg, []*proto.ExtensionDesc{ + pb.E_Ext_More, + pb.E_Ext_Text, + }) + if err != nil { + t.Fatalf("GetExtensions() failed: %s", err) + } + if exts[0] != ext1 { + t.Errorf("ext1 not in returned extensions: %T %v", exts[0], exts[0]) + } + if exts[1] != nil { + t.Errorf("ext2 in returned extensions: %T %v", exts[1], exts[1]) + } +} + +func TestGetExtensionStability(t *testing.T) { + check := func(m *pb.MyMessage) bool { + ext1, err := proto.GetExtension(m, pb.E_Ext_More) + if err != nil { + t.Fatalf("GetExtension() failed: %s", err) + } + ext2, err := proto.GetExtension(m, pb.E_Ext_More) + if err != nil { + t.Fatalf("GetExtension() failed: %s", err) + } + return ext1 == ext2 + } + msg := &pb.MyMessage{Count: proto.Int32(4)} + ext0 := &pb.Ext{} + if err := proto.SetExtension(msg, pb.E_Ext_More, ext0); err != nil { + t.Fatalf("Could not set ext1: %s", ext0) + } + if !check(msg) { + t.Errorf("GetExtension() not stable before marshaling") + } + bb, err := proto.Marshal(msg) + if err != nil { + t.Fatalf("Marshal() failed: %s", err) + } + msg1 := &pb.MyMessage{} + err = proto.Unmarshal(bb, msg1) + if err != nil { + t.Fatalf("Unmarshal() failed: %s", err) + } + if !check(msg1) { + t.Errorf("GetExtension() not stable after unmarshaling") + } +} + +func TestGetExtensionDefaults(t *testing.T) { + var setFloat64 float64 = 1 + var setFloat32 float32 = 2 + var setInt32 int32 = 3 + var setInt64 int64 = 4 + var setUint32 uint32 = 5 + var setUint64 uint64 = 6 + var setBool = true + var setBool2 = false + var setString = "Goodnight string" + var setBytes = []byte("Goodnight bytes") + var setEnum = pb.DefaultsMessage_TWO + + type testcase struct { + ext *proto.ExtensionDesc // Extension we are testing. + want interface{} // Expected value of extension, or nil (meaning that GetExtension will fail). + def interface{} // Expected value of extension after ClearExtension(). + } + tests := []testcase{ + {pb.E_NoDefaultDouble, setFloat64, nil}, + {pb.E_NoDefaultFloat, setFloat32, nil}, + {pb.E_NoDefaultInt32, setInt32, nil}, + {pb.E_NoDefaultInt64, setInt64, nil}, + {pb.E_NoDefaultUint32, setUint32, nil}, + {pb.E_NoDefaultUint64, setUint64, nil}, + {pb.E_NoDefaultSint32, setInt32, nil}, + {pb.E_NoDefaultSint64, setInt64, nil}, + {pb.E_NoDefaultFixed32, setUint32, nil}, + {pb.E_NoDefaultFixed64, setUint64, nil}, + {pb.E_NoDefaultSfixed32, setInt32, nil}, + {pb.E_NoDefaultSfixed64, setInt64, nil}, + {pb.E_NoDefaultBool, setBool, nil}, + {pb.E_NoDefaultBool, setBool2, nil}, + {pb.E_NoDefaultString, setString, nil}, + {pb.E_NoDefaultBytes, setBytes, nil}, + {pb.E_NoDefaultEnum, setEnum, nil}, + {pb.E_DefaultDouble, setFloat64, float64(3.1415)}, + {pb.E_DefaultFloat, setFloat32, float32(3.14)}, + {pb.E_DefaultInt32, setInt32, int32(42)}, + {pb.E_DefaultInt64, setInt64, int64(43)}, + {pb.E_DefaultUint32, setUint32, uint32(44)}, + {pb.E_DefaultUint64, setUint64, uint64(45)}, + {pb.E_DefaultSint32, setInt32, int32(46)}, + {pb.E_DefaultSint64, setInt64, int64(47)}, + {pb.E_DefaultFixed32, setUint32, uint32(48)}, + {pb.E_DefaultFixed64, setUint64, uint64(49)}, + {pb.E_DefaultSfixed32, setInt32, int32(50)}, + {pb.E_DefaultSfixed64, setInt64, int64(51)}, + {pb.E_DefaultBool, setBool, true}, + {pb.E_DefaultBool, setBool2, true}, + {pb.E_DefaultString, setString, "Hello, string"}, + {pb.E_DefaultBytes, setBytes, []byte("Hello, bytes")}, + {pb.E_DefaultEnum, setEnum, pb.DefaultsMessage_ONE}, + } + + checkVal := func(test testcase, msg *pb.DefaultsMessage, valWant interface{}) error { + val, err := proto.GetExtension(msg, test.ext) + if err != nil { + if valWant != nil { + return fmt.Errorf("GetExtension(): %s", err) + } + if want := proto.ErrMissingExtension; err != want { + return fmt.Errorf("Unexpected error: got %v, want %v", err, want) + } + return nil + } + + // All proto2 extension values are either a pointer to a value or a slice of values. + ty := reflect.TypeOf(val) + tyWant := reflect.TypeOf(test.ext.ExtensionType) + if got, want := ty, tyWant; got != want { + return fmt.Errorf("unexpected reflect.TypeOf(): got %v want %v", got, want) + } + tye := ty.Elem() + tyeWant := tyWant.Elem() + if got, want := tye, tyeWant; got != want { + return fmt.Errorf("unexpected reflect.TypeOf().Elem(): got %v want %v", got, want) + } + + // Check the name of the type of the value. + // If it is an enum it will be type int32 with the name of the enum. + if got, want := tye.Name(), tye.Name(); got != want { + return fmt.Errorf("unexpected reflect.TypeOf().Elem().Name(): got %v want %v", got, want) + } + + // Check that value is what we expect. + // If we have a pointer in val, get the value it points to. + valExp := val + if ty.Kind() == reflect.Ptr { + valExp = reflect.ValueOf(val).Elem().Interface() + } + if got, want := valExp, valWant; !reflect.DeepEqual(got, want) { + return fmt.Errorf("unexpected reflect.DeepEqual(): got %v want %v", got, want) + } + + return nil + } + + setTo := func(test testcase) interface{} { + setTo := reflect.ValueOf(test.want) + if typ := reflect.TypeOf(test.ext.ExtensionType); typ.Kind() == reflect.Ptr { + setTo = reflect.New(typ).Elem() + setTo.Set(reflect.New(setTo.Type().Elem())) + setTo.Elem().Set(reflect.ValueOf(test.want)) + } + return setTo.Interface() + } + + for _, test := range tests { + msg := &pb.DefaultsMessage{} + name := test.ext.Name + + // Check the initial value. + if err := checkVal(test, msg, test.def); err != nil { + t.Errorf("%s: %v", name, err) + } + + // Set the per-type value and check value. + name = fmt.Sprintf("%s (set to %T %v)", name, test.want, test.want) + if err := proto.SetExtension(msg, test.ext, setTo(test)); err != nil { + t.Errorf("%s: SetExtension(): %v", name, err) + continue + } + if err := checkVal(test, msg, test.want); err != nil { + t.Errorf("%s: %v", name, err) + continue + } + + // Set and check the value. + name += " (cleared)" + proto.ClearExtension(msg, test.ext) + if err := checkVal(test, msg, test.def); err != nil { + t.Errorf("%s: %v", name, err) + } + } +} + +func TestExtensionsRoundTrip(t *testing.T) { + msg := &pb.MyMessage{} + ext1 := &pb.Ext{ + Data: proto.String("hi"), + } + ext2 := &pb.Ext{ + Data: proto.String("there"), + } + exists := proto.HasExtension(msg, pb.E_Ext_More) + if exists { + t.Error("Extension More present unexpectedly") + } + if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { + t.Error(err) + } + if err := proto.SetExtension(msg, pb.E_Ext_More, ext2); err != nil { + t.Error(err) + } + e, err := proto.GetExtension(msg, pb.E_Ext_More) + if err != nil { + t.Error(err) + } + x, ok := e.(*pb.Ext) + if !ok { + t.Errorf("e has type %T, expected testdata.Ext", e) + } else if *x.Data != "there" { + t.Errorf("SetExtension failed to overwrite, got %+v, not 'there'", x) + } + proto.ClearExtension(msg, pb.E_Ext_More) + if _, err = proto.GetExtension(msg, pb.E_Ext_More); err != proto.ErrMissingExtension { + t.Errorf("got %v, expected ErrMissingExtension", e) + } + if _, err := proto.GetExtension(msg, pb.E_X215); err == nil { + t.Error("expected bad extension error, got nil") + } + if err := proto.SetExtension(msg, pb.E_X215, 12); err == nil { + t.Error("expected extension err") + } + if err := proto.SetExtension(msg, pb.E_Ext_More, 12); err == nil { + t.Error("expected some sort of type mismatch error, got nil") + } +} + +func TestNilExtension(t *testing.T) { + msg := &pb.MyMessage{ + Count: proto.Int32(1), + } + if err := proto.SetExtension(msg, pb.E_Ext_Text, proto.String("hello")); err != nil { + t.Fatal(err) + } + if err := proto.SetExtension(msg, pb.E_Ext_More, (*pb.Ext)(nil)); err == nil { + t.Error("expected SetExtension to fail due to a nil extension") + } else if want := "proto: SetExtension called with nil value of type *testdata.Ext"; err.Error() != want { + t.Errorf("expected error %v, got %v", want, err) + } + // Note: if the behavior of Marshal is ever changed to ignore nil extensions, update + // this test to verify that E_Ext_Text is properly propagated through marshal->unmarshal. +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go new file mode 100644 index 000000000..abbf7583e --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go @@ -0,0 +1,883 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package proto converts data structures to and from the wire format of +protocol buffers. It works in concert with the Go source code generated +for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +The simplest way to describe this is to see an example. +Given file test.proto, containing + + package example; + + enum FOO { X = 17; } + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + oneof union { + int32 number = 6; + string name = 7; + } + } + +The resulting file, test.pb.go, is: + + package example + + import proto "github.com/golang/protobuf/proto" + import math "math" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err + } + *x = FOO(value) + return nil + } + + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + // Types that are valid to be assigned to Union: + // *Test_Number + // *Test_Name + Union isTest_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` + } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} + + type isTest_Union interface { + isTest_Union() + } + + type Test_Number struct { + Number int32 `protobuf:"varint,6,opt,name=number"` + } + type Test_Name struct { + Name string `protobuf:"bytes,7,opt,name=name"` + } + + func (*Test_Number) isTest_Union() {} + func (*Test_Name) isTest_Union() {} + + func (m *Test) GetUnion() isTest_Union { + if m != nil { + return m.Union + } + return nil + } + const Default_Test_Type int32 = 77 + + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" + } + + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_Test_Type + } + + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } + + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" + } + + func (m *Test) GetNumber() int32 { + if x, ok := m.GetUnion().(*Test_Number); ok { + return x.Number + } + return 0 + } + + func (m *Test) GetName() string { + if x, ok := m.GetUnion().(*Test_Name); ok { + return x.Name + } + return "" + } + + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + +To create and play with a Test object: + +package main + + import ( + "log" + + "github.com/golang/protobuf/proto" + pb "./example.pb" + ) + + func main() { + test := &pb.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Optionalgroup: &pb.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + Union: &pb.Test_Name{"fred"}, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &pb.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // Use a type switch to determine which oneof was set. + switch u := test.Union.(type) { + case *pb.Test_Number: // u.Number contains the number. + case *pb.Test_Name: // u.Name contains the string. + } + // etc. + } +*/ +package proto + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// Stats records allocation details about the protocol buffer encoders +// and decoders. Useful for tuning the library itself. +type Stats struct { + Emalloc uint64 // mallocs in encode + Dmalloc uint64 // mallocs in decode + Encode uint64 // number of encodes + Decode uint64 // number of decodes + Chit uint64 // number of cache hits + Cmiss uint64 // number of cache misses + Size uint64 // number of sizes +} + +// Set to true to enable stats collection. +const collectStats = false + +var stats Stats + +// GetStats returns a copy of the global Stats structure. +func GetStats() Stats { return stats } + +// A Buffer is a buffer manager for marshaling and unmarshaling +// protocol buffers. It may be reused between invocations to +// reduce memory usage. It is not necessary to use a Buffer; +// the global functions Marshal and Unmarshal create a +// temporary Buffer and are fine for most applications. +type Buffer struct { + buf []byte // encode/decode byte stream + index int // write point + + // pools of basic types to amortize allocation. + bools []bool + uint32s []uint32 + uint64s []uint64 + + // extra pools, only used with pointer_reflect.go + int32s []int32 + int64s []int64 + float32s []float32 + float64s []float64 +} + +// NewBuffer allocates a new Buffer and initializes its internal data to +// the contents of the argument slice. +func NewBuffer(e []byte) *Buffer { + return &Buffer{buf: e} +} + +// Reset resets the Buffer, ready for marshaling a new protocol buffer. +func (p *Buffer) Reset() { + p.buf = p.buf[0:0] // for reading/writing + p.index = 0 // for reading +} + +// SetBuf replaces the internal buffer with the slice, +// ready for unmarshaling the contents of the slice. +func (p *Buffer) SetBuf(s []byte) { + p.buf = s + p.index = 0 +} + +// Bytes returns the contents of the Buffer. +func (p *Buffer) Bytes() []byte { return p.buf } + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + return &v +} + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { + return &v +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int32 { + p := new(int32) + *p = int32(v) + return p +} + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { + return &v +} + +// Float32 is a helper routine that allocates a new float32 value +// to store v and returns a pointer to it. +func Float32(v float32) *float32 { + return &v +} + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { + return &v +} + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { + return &v +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + return &v +} + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// UnmarshalJSONEnum is a helper function to simplify recovering enum int values +// from their JSON-encoded representation. Given a map from the enum's symbolic +// names to its int values, and a byte buffer containing the JSON-encoded +// value, it returns an int32 that can be cast to the enum type by the caller. +// +// The function can deal with both JSON representations, numeric and symbolic. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// DebugPrint dumps the encoded data in b in a debugging format with a header +// including the string s. Used in testing but made available for general debugging. +func (p *Buffer) DebugPrint(s string, b []byte) { + var u uint64 + + obuf := p.buf + index := p.index + p.buf = b + p.index = 0 + depth := 0 + + fmt.Printf("\n--- %s ---\n", s) + +out: + for { + for i := 0; i < depth; i++ { + fmt.Print(" ") + } + + index := p.index + if index == len(p.buf) { + break + } + + op, err := p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: fetching op err %v\n", index, err) + break out + } + tag := op >> 3 + wire := op & 7 + + switch wire { + default: + fmt.Printf("%3d: t=%3d unknown wire=%d\n", + index, tag, wire) + break out + + case WireBytes: + var r []byte + + r, err = p.DecodeRawBytes(false) + if err != nil { + break out + } + fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) + if len(r) <= 6 { + for i := 0; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } else { + for i := 0; i < 3; i++ { + fmt.Printf(" %.2x", r[i]) + } + fmt.Printf(" ..") + for i := len(r) - 3; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } + fmt.Printf("\n") + + case WireFixed32: + u, err = p.DecodeFixed32() + if err != nil { + fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) + + case WireFixed64: + u, err = p.DecodeFixed64() + if err != nil { + fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) + + case WireVarint: + u, err = p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) + + case WireStartGroup: + fmt.Printf("%3d: t=%3d start\n", index, tag) + depth++ + + case WireEndGroup: + depth-- + fmt.Printf("%3d: t=%3d end\n", index, tag) + } + } + + if depth != 0 { + fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) + } + fmt.Printf("\n") + + p.buf = obuf + p.index = index +} + +// SetDefaults sets unset protocol buffer fields to their default values. +// It only modifies fields that are both unset and have defined defaults. +// It recursively sets default values in any non-nil sub-messages. +func SetDefaults(pb Message) { + setDefaults(reflect.ValueOf(pb), true, false) +} + +// v is a pointer to a struct. +func setDefaults(v reflect.Value, recur, zeros bool) { + v = v.Elem() + + defaultMu.RLock() + dm, ok := defaults[v.Type()] + defaultMu.RUnlock() + if !ok { + dm = buildDefaultMessage(v.Type()) + defaultMu.Lock() + defaults[v.Type()] = dm + defaultMu.Unlock() + } + + for _, sf := range dm.scalars { + f := v.Field(sf.index) + if !f.IsNil() { + // field already set + continue + } + dv := sf.value + if dv == nil && !zeros { + // no explicit default, and don't want to set zeros + continue + } + fptr := f.Addr().Interface() // **T + // TODO: Consider batching the allocations we do here. + switch sf.kind { + case reflect.Bool: + b := new(bool) + if dv != nil { + *b = dv.(bool) + } + *(fptr.(**bool)) = b + case reflect.Float32: + f := new(float32) + if dv != nil { + *f = dv.(float32) + } + *(fptr.(**float32)) = f + case reflect.Float64: + f := new(float64) + if dv != nil { + *f = dv.(float64) + } + *(fptr.(**float64)) = f + case reflect.Int32: + // might be an enum + if ft := f.Type(); ft != int32PtrType { + // enum + f.Set(reflect.New(ft.Elem())) + if dv != nil { + f.Elem().SetInt(int64(dv.(int32))) + } + } else { + // int32 field + i := new(int32) + if dv != nil { + *i = dv.(int32) + } + *(fptr.(**int32)) = i + } + case reflect.Int64: + i := new(int64) + if dv != nil { + *i = dv.(int64) + } + *(fptr.(**int64)) = i + case reflect.String: + s := new(string) + if dv != nil { + *s = dv.(string) + } + *(fptr.(**string)) = s + case reflect.Uint8: + // exceptional case: []byte + var b []byte + if dv != nil { + db := dv.([]byte) + b = make([]byte, len(db)) + copy(b, db) + } else { + b = []byte{} + } + *(fptr.(*[]byte)) = b + case reflect.Uint32: + u := new(uint32) + if dv != nil { + *u = dv.(uint32) + } + *(fptr.(**uint32)) = u + case reflect.Uint64: + u := new(uint64) + if dv != nil { + *u = dv.(uint64) + } + *(fptr.(**uint64)) = u + default: + log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) + } + } + + for _, ni := range dm.nested { + f := v.Field(ni) + // f is *T or []*T or map[T]*T + switch f.Kind() { + case reflect.Ptr: + if f.IsNil() { + continue + } + setDefaults(f, recur, zeros) + + case reflect.Slice: + for i := 0; i < f.Len(); i++ { + e := f.Index(i) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + + case reflect.Map: + for _, k := range f.MapKeys() { + e := f.MapIndex(k) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + } + } +} + +var ( + // defaults maps a protocol buffer struct type to a slice of the fields, + // with its scalar fields set to their proto-declared non-zero default values. + defaultMu sync.RWMutex + defaults = make(map[reflect.Type]defaultMessage) + + int32PtrType = reflect.TypeOf((*int32)(nil)) +) + +// defaultMessage represents information about the default values of a message. +type defaultMessage struct { + scalars []scalarField + nested []int // struct field index of nested messages +} + +type scalarField struct { + index int // struct field index + kind reflect.Kind // element type (the T in *T or []T) + value interface{} // the proto-declared default value, or nil +} + +// t is a struct type. +func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { + sprop := GetProperties(t) + for _, prop := range sprop.Prop { + fi, ok := sprop.decoderTags.get(prop.Tag) + if !ok { + // XXX_unrecognized + continue + } + ft := t.Field(fi).Type + + sf, nested, err := fieldDefault(ft, prop) + switch { + case err != nil: + log.Print(err) + case nested: + dm.nested = append(dm.nested, fi) + case sf != nil: + sf.index = fi + dm.scalars = append(dm.scalars, *sf) + } + } + + return dm +} + +// fieldDefault returns the scalarField for field type ft. +// sf will be nil if the field can not have a default. +// nestedMessage will be true if this is a nested message. +// Note that sf.index is not set on return. +func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { + var canHaveDefault bool + switch ft.Kind() { + case reflect.Ptr: + if ft.Elem().Kind() == reflect.Struct { + nestedMessage = true + } else { + canHaveDefault = true // proto2 scalar field + } + + case reflect.Slice: + switch ft.Elem().Kind() { + case reflect.Ptr: + nestedMessage = true // repeated message + case reflect.Uint8: + canHaveDefault = true // bytes field + } + + case reflect.Map: + if ft.Elem().Kind() == reflect.Ptr { + nestedMessage = true // map with message values + } + } + + if !canHaveDefault { + if nestedMessage { + return nil, true, nil + } + return nil, false, nil + } + + // We now know that ft is a pointer or slice. + sf = &scalarField{kind: ft.Elem().Kind()} + + // scalar fields without defaults + if !prop.HasDefault { + return sf, false, nil + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) + } + sf.value = x + default: + return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) + } + + return sf, false, nil +} + +// Map fields may have key types of non-float scalars, strings and enums. +// The easiest way to sort them in some deterministic order is to use fmt. +// If this turns out to be inefficient we can always consider other options, +// such as doing a Schwartzian transform. + +func mapKeys(vs []reflect.Value) sort.Interface { + s := mapKeySorter{ + vs: vs, + // default Less function: textual comparison + less: func(a, b reflect.Value) bool { + return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) + }, + } + + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; + // numeric keys are sorted numerically. + if len(vs) == 0 { + return s + } + switch vs[0].Kind() { + case reflect.Int32, reflect.Int64: + s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } + case reflect.Uint32, reflect.Uint64: + s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + } + + return s +} + +type mapKeySorter struct { + vs []reflect.Value + less func(a, b reflect.Value) bool +} + +func (s mapKeySorter) Len() int { return len(s.vs) } +func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s mapKeySorter) Less(i, j int) bool { + return s.less(s.vs[i], s.vs[j]) +} + +// isProto3Zero reports whether v is a zero proto3 value. +func isProto3Zero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.String: + return v.String() == "" + } + return false +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set.go new file mode 100644 index 000000000..9d912bce1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set.go @@ -0,0 +1,287 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Support for message sets. + */ + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" +) + +// ErrNoMessageTypeId occurs when a protocol buffer does not have a message type ID. +// A message type ID is required for storing a protocol buffer in a message set. +var ErrNoMessageTypeId = errors.New("proto does not have a message type ID") + +// The first two types (_MessageSet_Item and MessageSet) +// model what the protocol compiler produces for the following protocol message: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } +// That is the MessageSet wire format. We can't use a proto to generate these +// because that would introduce a circular dependency between it and this package. +// +// When a proto1 proto has a field that looks like: +// optional message info = 3; +// the protocol compiler produces a field in the generated struct that looks like: +// Info *_proto_.MessageSet `protobuf:"bytes,3,opt,name=info"` +// The package is automatically inserted so there is no need for that proto file to +// import this package. + +type _MessageSet_Item struct { + TypeId *int32 `protobuf:"varint,2,req,name=type_id"` + Message []byte `protobuf:"bytes,3,req,name=message"` +} + +type MessageSet struct { + Item []*_MessageSet_Item `protobuf:"group,1,rep"` + XXX_unrecognized []byte + // TODO: caching? +} + +// Make sure MessageSet is a Message. +var _ Message = (*MessageSet)(nil) + +// messageTypeIder is an interface satisfied by a protocol buffer type +// that may be stored in a MessageSet. +type messageTypeIder interface { + MessageTypeId() int32 +} + +func (ms *MessageSet) find(pb Message) *_MessageSet_Item { + mti, ok := pb.(messageTypeIder) + if !ok { + return nil + } + id := mti.MessageTypeId() + for _, item := range ms.Item { + if *item.TypeId == id { + return item + } + } + return nil +} + +func (ms *MessageSet) Has(pb Message) bool { + if ms.find(pb) != nil { + return true + } + return false +} + +func (ms *MessageSet) Unmarshal(pb Message) error { + if item := ms.find(pb); item != nil { + return Unmarshal(item.Message, pb) + } + if _, ok := pb.(messageTypeIder); !ok { + return ErrNoMessageTypeId + } + return nil // TODO: return error instead? +} + +func (ms *MessageSet) Marshal(pb Message) error { + msg, err := Marshal(pb) + if err != nil { + return err + } + if item := ms.find(pb); item != nil { + // reuse existing item + item.Message = msg + return nil + } + + mti, ok := pb.(messageTypeIder) + if !ok { + return ErrNoMessageTypeId + } + + mtid := mti.MessageTypeId() + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: &mtid, + Message: msg, + }) + return nil +} + +func (ms *MessageSet) Reset() { *ms = MessageSet{} } +func (ms *MessageSet) String() string { return CompactTextString(ms) } +func (*MessageSet) ProtoMessage() {} + +// Support for the message_set_wire_format message option. + +func skipVarint(buf []byte) []byte { + i := 0 + for ; buf[i]&0x80 != 0; i++ { + } + return buf[i+1:] +} + +// MarshalMessageSet encodes the extension map represented by m in the message set wire format. +// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSet(m map[int32]Extension) ([]byte, error) { + if err := encodeExtensionMap(m); err != nil { + return nil, err + } + + // Sort extension IDs to provide a deterministic encoding. + // See also enc_map in encode.go. + ids := make([]int, 0, len(m)) + for id := range m { + ids = append(ids, int(id)) + } + sort.Ints(ids) + + ms := &MessageSet{Item: make([]*_MessageSet_Item, 0, len(m))} + for _, id := range ids { + e := m[int32(id)] + // Remove the wire type and field number varint, as well as the length varint. + msg := skipVarint(skipVarint(e.enc)) + + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: Int32(int32(id)), + Message: msg, + }) + } + return Marshal(ms) +} + +// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error { + ms := new(MessageSet) + if err := Unmarshal(buf, ms); err != nil { + return err + } + for _, item := range ms.Item { + id := *item.TypeId + msg := item.Message + + // Restore wire type and field number varint, plus length varint. + // Be careful to preserve duplicate items. + b := EncodeVarint(uint64(id)<<3 | WireBytes) + if ext, ok := m[id]; ok { + // Existing data; rip off the tag and length varint + // so we join the new data correctly. + // We can assume that ext.enc is set because we are unmarshaling. + o := ext.enc[len(b):] // skip wire type and field number + _, n := DecodeVarint(o) // calculate length of length varint + o = o[n:] // skip length varint + msg = append(o, msg...) // join old data and new data + } + b = append(b, EncodeVarint(uint64(len(msg)))...) + b = append(b, msg...) + + m[id] = Extension{enc: b} + } + return nil +} + +// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. +// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) { + var b bytes.Buffer + b.WriteByte('{') + + // Process the map in key order for deterministic output. + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) // int32Slice defined in text.go + + for i, id := range ids { + ext := m[id] + if i > 0 { + b.WriteByte(',') + } + + msd, ok := messageSetMap[id] + if !ok { + // Unknown type; we can't render it, so skip it. + continue + } + fmt.Fprintf(&b, `"[%s]":`, msd.name) + + x := ext.value + if x == nil { + x = reflect.New(msd.t.Elem()).Interface() + if err := Unmarshal(ext.enc, x.(Message)); err != nil { + return nil, err + } + } + d, err := json.Marshal(x) + if err != nil { + return nil, err + } + b.Write(d) + } + b.WriteByte('}') + return b.Bytes(), nil +} + +// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. +// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error { + // Common-case fast path. + if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { + return nil + } + + // This is fairly tricky, and it's not clear that it is needed. + return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") +} + +// A global registry of types that can be used in a MessageSet. + +var messageSetMap = make(map[int32]messageSetDesc) + +type messageSetDesc struct { + t reflect.Type // pointer to struct + name string +} + +// RegisterMessageSetType is called from the generated code. +func RegisterMessageSetType(m Message, fieldNum int32, name string) { + messageSetMap[fieldNum] = messageSetDesc{ + t: reflect.TypeOf(m), + name: name, + } +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set_test.go new file mode 100644 index 000000000..7c29bccf4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set_test.go @@ -0,0 +1,66 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "bytes" + "testing" +) + +func TestUnmarshalMessageSetWithDuplicate(t *testing.T) { + // Check that a repeated message set entry will be concatenated. + in := &MessageSet{ + Item: []*_MessageSet_Item{ + {TypeId: Int32(12345), Message: []byte("hoo")}, + {TypeId: Int32(12345), Message: []byte("hah")}, + }, + } + b, err := Marshal(in) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + t.Logf("Marshaled bytes: %q", b) + + m := make(map[int32]Extension) + if err := UnmarshalMessageSet(b, m); err != nil { + t.Fatalf("UnmarshalMessageSet: %v", err) + } + ext, ok := m[12345] + if !ok { + t.Fatalf("Didn't retrieve extension 12345; map is %v", m) + } + // Skip wire type/field number and length varints. + got := skipVarint(skipVarint(ext.enc)) + if want := []byte("hoohah"); !bytes.Equal(got, want) { + t.Errorf("Combined extension is %q, want %q", got, want) + } +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_reflect.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_reflect.go new file mode 100644 index 000000000..749919d25 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_reflect.go @@ -0,0 +1,479 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build appengine + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "math" + "reflect" +) + +// A structPointer is a pointer to a struct. +type structPointer struct { + v reflect.Value +} + +// toStructPointer returns a structPointer equivalent to the given reflect value. +// The reflect value must itself be a pointer to a struct. +func toStructPointer(v reflect.Value) structPointer { + return structPointer{v} +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p.v.IsNil() +} + +// Interface returns the struct pointer as an interface value. +func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { + return p.v.Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by the sequence of field indices +// passed to reflect's FieldByIndex. +type field []int + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return f.Index +} + +// invalidField is an invalid field identifier. +var invalidField = field(nil) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { return f != nil } + +// field returns the given field in the struct as a reflect value. +func structPointer_field(p structPointer, f field) reflect.Value { + // Special case: an extension map entry with a value of type T + // passes a *T to the struct-handling code with a zero field, + // expecting that it will be treated as equivalent to *struct{ X T }, + // which has the same memory layout. We have to handle that case + // specially, because reflect will panic if we call FieldByIndex on a + // non-struct. + if f == nil { + return p.v.Elem() + } + + return p.v.Elem().FieldByIndex(f) +} + +// ifield returns the given field in the struct as an interface value. +func structPointer_ifield(p structPointer, f field) interface{} { + return structPointer_field(p, f).Addr().Interface() +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return structPointer_ifield(p, f).(*[]byte) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return structPointer_ifield(p, f).(*[][]byte) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return structPointer_ifield(p, f).(**bool) +} + +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return structPointer_ifield(p, f).(*bool) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return structPointer_ifield(p, f).(*[]bool) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return structPointer_ifield(p, f).(**string) +} + +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return structPointer_ifield(p, f).(*string) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return structPointer_ifield(p, f).(*[]string) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return structPointer_ifield(p, f).(*map[int32]Extension) +} + +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { + return structPointer_field(p, f).Addr() +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + structPointer_field(p, f).Set(q.v) +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return structPointer{structPointer_field(p, f)} +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { + return structPointerSlice{structPointer_field(p, f)} +} + +// A structPointerSlice represents the address of a slice of pointers to structs +// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. +type structPointerSlice struct { + v reflect.Value +} + +func (p structPointerSlice) Len() int { return p.v.Len() } +func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } +func (p structPointerSlice) Append(q structPointer) { + p.v.Set(reflect.Append(p.v, q.v)) +} + +var ( + int32Type = reflect.TypeOf(int32(0)) + uint32Type = reflect.TypeOf(uint32(0)) + float32Type = reflect.TypeOf(float32(0)) + int64Type = reflect.TypeOf(int64(0)) + uint64Type = reflect.TypeOf(uint64(0)) + float64Type = reflect.TypeOf(float64(0)) +) + +// A word32 represents a field of type *int32, *uint32, *float32, or *enum. +// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. +type word32 struct { + v reflect.Value +} + +// IsNil reports whether p is nil. +func word32_IsNil(p word32) bool { + return p.v.IsNil() +} + +// Set sets p to point at a newly allocated word with bits set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + t := p.v.Type().Elem() + switch t { + case int32Type: + if len(o.int32s) == 0 { + o.int32s = make([]int32, uint32PoolSize) + } + o.int32s[0] = int32(x) + p.v.Set(reflect.ValueOf(&o.int32s[0])) + o.int32s = o.int32s[1:] + return + case uint32Type: + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + p.v.Set(reflect.ValueOf(&o.uint32s[0])) + o.uint32s = o.uint32s[1:] + return + case float32Type: + if len(o.float32s) == 0 { + o.float32s = make([]float32, uint32PoolSize) + } + o.float32s[0] = math.Float32frombits(x) + p.v.Set(reflect.ValueOf(&o.float32s[0])) + o.float32s = o.float32s[1:] + return + } + + // must be enum + p.v.Set(reflect.New(t)) + p.v.Elem().SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32_Get(p word32) uint32 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32{structPointer_field(p, f)} +} + +// A word32Val represents a field of type int32, uint32, float32, or enum. +// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. +type word32Val struct { + v reflect.Value +} + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + switch p.v.Type() { + case int32Type: + p.v.SetInt(int64(x)) + return + case uint32Type: + p.v.SetUint(uint64(x)) + return + case float32Type: + p.v.SetFloat(float64(math.Float32frombits(x))) + return + } + + // must be enum + p.v.SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32Val_Get(p word32Val) uint32 { + elem := p.v + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val{structPointer_field(p, f)} +} + +// A word32Slice is a slice of 32-bit values. +// That is, v.Type() is []int32, []uint32, []float32, or []enum. +type word32Slice struct { + v reflect.Value +} + +func (p word32Slice) Append(x uint32) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int32: + elem.SetInt(int64(int32(x))) + case reflect.Uint32: + elem.SetUint(uint64(x)) + case reflect.Float32: + elem.SetFloat(float64(math.Float32frombits(x))) + } +} + +func (p word32Slice) Len() int { + return p.v.Len() +} + +func (p word32Slice) Index(i int) uint32 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) word32Slice { + return word32Slice{structPointer_field(p, f)} +} + +// word64 is like word32 but for 64-bit values. +type word64 struct { + v reflect.Value +} + +func word64_Set(p word64, o *Buffer, x uint64) { + t := p.v.Type().Elem() + switch t { + case int64Type: + if len(o.int64s) == 0 { + o.int64s = make([]int64, uint64PoolSize) + } + o.int64s[0] = int64(x) + p.v.Set(reflect.ValueOf(&o.int64s[0])) + o.int64s = o.int64s[1:] + return + case uint64Type: + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + p.v.Set(reflect.ValueOf(&o.uint64s[0])) + o.uint64s = o.uint64s[1:] + return + case float64Type: + if len(o.float64s) == 0 { + o.float64s = make([]float64, uint64PoolSize) + } + o.float64s[0] = math.Float64frombits(x) + p.v.Set(reflect.ValueOf(&o.float64s[0])) + o.float64s = o.float64s[1:] + return + } + panic("unreachable") +} + +func word64_IsNil(p word64) bool { + return p.v.IsNil() +} + +func word64_Get(p word64) uint64 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64{structPointer_field(p, f)} +} + +// word64Val is like word32Val but for 64-bit values. +type word64Val struct { + v reflect.Value +} + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + switch p.v.Type() { + case int64Type: + p.v.SetInt(int64(x)) + return + case uint64Type: + p.v.SetUint(x) + return + case float64Type: + p.v.SetFloat(math.Float64frombits(x)) + return + } + panic("unreachable") +} + +func word64Val_Get(p word64Val) uint64 { + elem := p.v + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val{structPointer_field(p, f)} +} + +type word64Slice struct { + v reflect.Value +} + +func (p word64Slice) Append(x uint64) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int64: + elem.SetInt(int64(int64(x))) + case reflect.Uint64: + elem.SetUint(uint64(x)) + case reflect.Float64: + elem.SetFloat(float64(math.Float64frombits(x))) + } +} + +func (p word64Slice) Len() int { + return p.v.Len() +} + +func (p word64Slice) Index(i int) uint64 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return uint64(elem.Uint()) + case reflect.Float64: + return math.Float64bits(float64(elem.Float())) + } + panic("unreachable") +} + +func structPointer_Word64Slice(p structPointer, f field) word64Slice { + return word64Slice{structPointer_field(p, f)} +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_unsafe.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_unsafe.go new file mode 100644 index 000000000..e9be0fe92 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_unsafe.go @@ -0,0 +1,266 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !appengine + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +// NOTE: These type_Foo functions would more idiomatically be methods, +// but Go does not allow methods on pointer types, and we must preserve +// some pointer type for the garbage collector. We use these +// funcs with clunky names as our poor approximation to methods. +// +// An alternative would be +// type structPointer struct { p unsafe.Pointer } +// but that does not registerize as well. + +// A structPointer is a pointer to a struct. +type structPointer unsafe.Pointer + +// toStructPointer returns a structPointer equivalent to the given reflect value. +func toStructPointer(v reflect.Value) structPointer { + return structPointer(unsafe.Pointer(v.Pointer())) +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p == nil +} + +// Interface returns the struct pointer, assumed to have element type t, +// as an interface value. +func structPointer_Interface(p structPointer, t reflect.Type) interface{} { + return reflect.NewAt(t, unsafe.Pointer(p)).Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by its byte offset from the start of the struct. +type field uintptr + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return field(f.Offset) +} + +// invalidField is an invalid field identifier. +const invalidField = ^field(0) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { + return f != ^field(0) +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { + return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { + return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). +type structPointerSlice []structPointer + +func (v *structPointerSlice) Len() int { return len(*v) } +func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } +func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } + +// A word32 is the address of a "pointer to 32-bit value" field. +type word32 **uint32 + +// IsNil reports whether *v is nil. +func word32_IsNil(p word32) bool { + return *p == nil +} + +// Set sets *v to point at a newly allocated word set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + *p = &o.uint32s[0] + o.uint32s = o.uint32s[1:] +} + +// Get gets the value pointed at by *v. +func word32_Get(p word32) uint32 { + return **p +} + +// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Val is the address of a 32-bit value field. +type word32Val *uint32 + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + *p = x +} + +// Get gets the value pointed at by p. +func word32Val_Get(p word32Val) uint32 { + return *p +} + +// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Slice is a slice of 32-bit values. +type word32Slice []uint32 + +func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } +func (v *word32Slice) Len() int { return len(*v) } +func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } + +// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) *word32Slice { + return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// word64 is like word32 but for 64-bit values. +type word64 **uint64 + +func word64_Set(p word64, o *Buffer, x uint64) { + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + *p = &o.uint64s[0] + o.uint64s = o.uint64s[1:] +} + +func word64_IsNil(p word64) bool { + return *p == nil +} + +func word64_Get(p word64) uint64 { + return **p +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Val is like word32Val but for 64-bit values. +type word64Val *uint64 + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + *p = x +} + +func word64Val_Get(p word64Val) uint64 { + return *p +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Slice is like word32Slice but for 64-bit values. +type word64Slice []uint64 + +func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } +func (v *word64Slice) Len() int { return len(*v) } +func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } + +func structPointer_Word64Slice(p structPointer, f field) *word64Slice { + return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go new file mode 100644 index 000000000..bbca4daf0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go @@ -0,0 +1,805 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +const startSize = 10 // initial slice/string sizes + +// Encoders are defined in encode.go +// An encoder outputs the full representation of a field, including its +// tag and encoder type. +type encoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueEncoder encodes a single integer in a particular encoding. +type valueEncoder func(o *Buffer, x uint64) error + +// Sizers are defined in encode.go +// A sizer returns the encoded size of a field, including its tag and encoder +// type. +type sizer func(prop *Properties, base structPointer) int + +// A valueSizer returns the encoded size of a single integer in a particular +// encoding. +type valueSizer func(x uint64) int + +// Decoders are defined in decode.go +// A decoder creates a value from its wire representation. +// Unrecognized subelements are saved in unrec. +type decoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueDecoder decodes a single integer in a particular encoding. +type valueDecoder func(o *Buffer) (x uint64, err error) + +// A oneofMarshaler does the marshaling for all oneof fields in a message. +type oneofMarshaler func(Message, *Buffer) error + +// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. +type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + unrecField field // field id of the XXX_unrecognized []byte field + extendable bool // is this an extendable proto + + oneofMarshaler oneofMarshaler + oneofUnmarshaler oneofUnmarshaler + stype reflect.Type + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the original name of a field. + OneofTypes map[string]*OneofProperties +} + +// OneofProperties represents information about a specific field in a oneof. +type OneofProperties struct { + Type reflect.Type // pointer to generated struct type for this oneof field + Field int // struct field number of the containing oneof in the message + Prop *Properties +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + proto3 bool // whether this is known to be a proto3 field; set for []byte only + oneof bool // whether this is a oneof field + + Default string // default value + HasDefault bool // whether an explicit default was provided + def_uint64 uint64 + + enc encoder + valEnc valueEncoder // set for bool and numeric types only + field field + tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) + tagbuf [8]byte + stype reflect.Type // set for struct types only + sprop *StructProperties // set for struct types only + isMarshaler bool + isUnmarshaler bool + + mtype reflect.Type // set for map types only + mkeyprop *Properties // set for map types only + mvalprop *Properties // set for map types only + + size sizer + valSize valueSizer // set for bool and numeric types only + + dec decoder + valDec valueDecoder // set for bool and numeric types only + + // If this is a packable field, this will be the decoder for the packed version of the field. + packedDec decoder +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s = "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + if p.OrigName != p.Name { + s += ",name=" + p.OrigName + } + if p.proto3 { + s += ",proto3" + } + if p.oneof { + s += ",oneof" + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeVarint + p.valDec = (*Buffer).DecodeVarint + p.valSize = sizeVarint + case "fixed32": + p.WireType = WireFixed32 + p.valEnc = (*Buffer).EncodeFixed32 + p.valDec = (*Buffer).DecodeFixed32 + p.valSize = sizeFixed32 + case "fixed64": + p.WireType = WireFixed64 + p.valEnc = (*Buffer).EncodeFixed64 + p.valDec = (*Buffer).DecodeFixed64 + p.valSize = sizeFixed64 + case "zigzag32": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag32 + p.valDec = (*Buffer).DecodeZigzag32 + p.valSize = sizeZigzag32 + case "zigzag64": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag64 + p.valDec = (*Buffer).DecodeZigzag64 + p.valSize = sizeZigzag64 + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case f == "proto3": + p.proto3 = true + case f == "oneof": + p.oneof = true + case strings.HasPrefix(f, "def="): + p.HasDefault = true + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break + } + } + } +} + +func logNoSliceEnc(t1, t2 reflect.Type) { + fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// Initialize the fields for encoding and decoding. +func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { + p.enc = nil + p.dec = nil + p.size = nil + + switch t1 := typ; t1.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) + + // proto3 scalar types + + case reflect.Bool: + p.enc = (*Buffer).enc_proto3_bool + p.dec = (*Buffer).dec_proto3_bool + p.size = size_proto3_bool + case reflect.Int32: + p.enc = (*Buffer).enc_proto3_int32 + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_int32 + case reflect.Uint32: + p.enc = (*Buffer).enc_proto3_uint32 + p.dec = (*Buffer).dec_proto3_int32 // can reuse + p.size = size_proto3_uint32 + case reflect.Int64, reflect.Uint64: + p.enc = (*Buffer).enc_proto3_int64 + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + case reflect.Float32: + p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_uint32 + case reflect.Float64: + p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + case reflect.String: + p.enc = (*Buffer).enc_proto3_string + p.dec = (*Buffer).dec_proto3_string + p.size = size_proto3_string + + case reflect.Ptr: + switch t2 := t1.Elem(); t2.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) + break + case reflect.Bool: + p.enc = (*Buffer).enc_bool + p.dec = (*Buffer).dec_bool + p.size = size_bool + case reflect.Int32: + p.enc = (*Buffer).enc_int32 + p.dec = (*Buffer).dec_int32 + p.size = size_int32 + case reflect.Uint32: + p.enc = (*Buffer).enc_uint32 + p.dec = (*Buffer).dec_int32 // can reuse + p.size = size_uint32 + case reflect.Int64, reflect.Uint64: + p.enc = (*Buffer).enc_int64 + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.Float32: + p.enc = (*Buffer).enc_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_int32 + p.size = size_uint32 + case reflect.Float64: + p.enc = (*Buffer).enc_int64 // can just treat them as bits + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.String: + p.enc = (*Buffer).enc_string + p.dec = (*Buffer).dec_string + p.size = size_string + case reflect.Struct: + p.stype = t1.Elem() + p.isMarshaler = isMarshaler(t1) + p.isUnmarshaler = isUnmarshaler(t1) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_struct_message + p.dec = (*Buffer).dec_struct_message + p.size = size_struct_message + } else { + p.enc = (*Buffer).enc_struct_group + p.dec = (*Buffer).dec_struct_group + p.size = size_struct_group + } + } + + case reflect.Slice: + switch t2 := t1.Elem(); t2.Kind() { + default: + logNoSliceEnc(t1, t2) + break + case reflect.Bool: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_bool + p.size = size_slice_packed_bool + } else { + p.enc = (*Buffer).enc_slice_bool + p.size = size_slice_bool + } + p.dec = (*Buffer).dec_slice_bool + p.packedDec = (*Buffer).dec_slice_packed_bool + case reflect.Int32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int32 + p.size = size_slice_packed_int32 + } else { + p.enc = (*Buffer).enc_slice_int32 + p.size = size_slice_int32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Uint32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Int64, reflect.Uint64: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + case reflect.Uint8: + p.enc = (*Buffer).enc_slice_byte + p.dec = (*Buffer).dec_slice_byte + p.size = size_slice_byte + // This is a []byte, which is either a bytes field, + // or the value of a map field. In the latter case, + // we always encode an empty []byte, so we should not + // use the proto3 enc/size funcs. + // f == nil iff this is the key/value of a map field. + if p.proto3 && f != nil { + p.enc = (*Buffer).enc_proto3_slice_byte + p.size = size_proto3_slice_byte + } + case reflect.Float32, reflect.Float64: + switch t2.Bits() { + case 32: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case 64: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + default: + logNoSliceEnc(t1, t2) + break + } + case reflect.String: + p.enc = (*Buffer).enc_slice_string + p.dec = (*Buffer).dec_slice_string + p.size = size_slice_string + case reflect.Ptr: + switch t3 := t2.Elem(); t3.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) + break + case reflect.Struct: + p.stype = t2.Elem() + p.isMarshaler = isMarshaler(t2) + p.isUnmarshaler = isUnmarshaler(t2) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_slice_struct_message + p.dec = (*Buffer).dec_slice_struct_message + p.size = size_slice_struct_message + } else { + p.enc = (*Buffer).enc_slice_struct_group + p.dec = (*Buffer).dec_slice_struct_group + p.size = size_slice_struct_group + } + } + case reflect.Slice: + switch t2.Elem().Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) + break + case reflect.Uint8: + p.enc = (*Buffer).enc_slice_slice_byte + p.dec = (*Buffer).dec_slice_slice_byte + p.size = size_slice_slice_byte + } + } + + case reflect.Map: + p.enc = (*Buffer).enc_new_map + p.dec = (*Buffer).dec_new_map + p.size = size_new_map + + p.mtype = t1 + p.mkeyprop = &Properties{} + p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.mvalprop = &Properties{} + vtype := p.mtype.Elem() + if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { + // The value type is not a message (*T) or bytes ([]byte), + // so we need encoders for the pointer to this type. + vtype = reflect.PtrTo(vtype) + } + p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + } + + // precalculate tag code + wire := p.WireType + if p.Packed { + wire = WireBytes + } + x := uint32(p.Tag)<<3 | uint32(wire) + i := 0 + for i = 0; x > 127; i++ { + p.tagbuf[i] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + p.tagbuf[i] = uint8(x) + p.tagcode = p.tagbuf[0 : i+1] + + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() +) + +// isMarshaler reports whether type t implements Marshaler. +func isMarshaler(t reflect.Type) bool { + // We're checking for (likely) pointer-receiver methods + // so if t is not a pointer, something is very wrong. + // The calls above only invoke isMarshaler on pointer types. + if t.Kind() != reflect.Ptr { + panic("proto: misuse of isMarshaler") + } + return t.Implements(marshalerType) +} + +// isUnmarshaler reports whether type t implements Unmarshaler. +func isUnmarshaler(t reflect.Type) bool { + // We're checking for (likely) pointer-receiver methods + // so if t is not a pointer, something is very wrong. + // The calls above only invoke isUnmarshaler on pointer types. + if t.Kind() != reflect.Ptr { + panic("proto: misuse of isUnmarshaler") + } + return t.Implements(unmarshalerType) +} + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if f != nil { + p.field = toField(f) + } + if tag == "" { + return + } + p.Parse(tag) + p.setEncAndDec(typ, f, lockGetProp) +} + +var ( + propertiesMu sync.RWMutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +// t must represent a generated struct type of a protocol message. +func GetProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic("proto: type must have kind struct") + } + + // Most calls to GetProperties in a long-running program will be + // retrieving details for types we have seen before. + propertiesMu.RLock() + sprop, ok := propertiesMap[t] + propertiesMu.RUnlock() + if ok { + if collectStats { + stats.Chit++ + } + return sprop + } + + propertiesMu.Lock() + sprop = getPropertiesLocked(t) + propertiesMu.Unlock() + return sprop +} + +// getPropertiesLocked requires that propertiesMu is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + if collectStats { + stats.Chit++ + } + return prop + } + if collectStats { + stats.Cmiss++ + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) + prop.unrecField = invalidField + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + if f.Name == "XXX_extensions" { // special case + p.enc = (*Buffer).enc_map + p.dec = nil // not needed + p.size = size_map + } + if f.Name == "XXX_unrecognized" { // special case + prop.unrecField = toField(&f) + } + oneof := f.Tag.Get("protobuf_oneof") != "" // special case + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof { + fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), []interface{}) + } + if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { + var oots []interface{} + prop.oneofMarshaler, prop.oneofUnmarshaler, oots = om.XXX_OneofFuncs() + prop.stype = t + + // Interpret oneof metadata. + prop.OneofTypes = make(map[string]*OneofProperties) + for _, oot := range oots { + oop := &OneofProperties{ + Type: reflect.ValueOf(oot).Type(), // *T + Prop: new(Properties), + } + sft := oop.Type.Elem().Field(0) + oop.Prop.Name = sft.Name + oop.Prop.Parse(sft.Tag.Get("protobuf")) + // There will be exactly one interface field that + // this new value is assignable to. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type.Kind() != reflect.Interface { + continue + } + if !oop.Type.AssignableTo(f.Type) { + continue + } + oop.Field = i + break + } + prop.OneofTypes[oop.Prop.OrigName] = oop + } + } + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// Return the Properties object for the x[0]'th field of the structure. +func propByIndex(t reflect.Type, x []int) *Properties { + if len(x) != 1 { + fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) + return nil + } + prop := GetProperties(t) + return prop.Prop[x[0]] +} + +// Get the address and type of a pointer to a struct from an interface. +func getbase(pb Message) (t reflect.Type, b structPointer, err error) { + if pb == nil { + err = ErrNil + return + } + // get the reflect type of the pointer to the struct. + t = reflect.TypeOf(pb) + // get the address of the struct. + value := reflect.ValueOf(pb) + b = toStructPointer(value) + return +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go new file mode 100644 index 000000000..37c778209 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go @@ -0,0 +1,122 @@ +// Code generated by protoc-gen-go. +// source: proto3_proto/proto3.proto +// DO NOT EDIT! + +/* +Package proto3_proto is a generated protocol buffer package. + +It is generated from these files: + proto3_proto/proto3.proto + +It has these top-level messages: + Message + Nested + MessageWithMap +*/ +package proto3_proto + +import proto "github.com/golang/protobuf/proto" +import testdata "github.com/golang/protobuf/proto/testdata" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal + +type Message_Humour int32 + +const ( + Message_UNKNOWN Message_Humour = 0 + Message_PUNS Message_Humour = 1 + Message_SLAPSTICK Message_Humour = 2 + Message_BILL_BAILEY Message_Humour = 3 +) + +var Message_Humour_name = map[int32]string{ + 0: "UNKNOWN", + 1: "PUNS", + 2: "SLAPSTICK", + 3: "BILL_BAILEY", +} +var Message_Humour_value = map[string]int32{ + "UNKNOWN": 0, + "PUNS": 1, + "SLAPSTICK": 2, + "BILL_BAILEY": 3, +} + +func (x Message_Humour) String() string { + return proto.EnumName(Message_Humour_name, int32(x)) +} + +type Message struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Hilarity Message_Humour `protobuf:"varint,2,opt,name=hilarity,enum=proto3_proto.Message_Humour" json:"hilarity,omitempty"` + HeightInCm uint32 `protobuf:"varint,3,opt,name=height_in_cm" json:"height_in_cm,omitempty"` + Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` + ResultCount int64 `protobuf:"varint,7,opt,name=result_count" json:"result_count,omitempty"` + TrueScotsman bool `protobuf:"varint,8,opt,name=true_scotsman" json:"true_scotsman,omitempty"` + Score float32 `protobuf:"fixed32,9,opt,name=score" json:"score,omitempty"` + Key []uint64 `protobuf:"varint,5,rep,name=key" json:"key,omitempty"` + Nested *Nested `protobuf:"bytes,6,opt,name=nested" json:"nested,omitempty"` + Terrain map[string]*Nested `protobuf:"bytes,10,rep,name=terrain" json:"terrain,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Proto2Field *testdata.SubDefaults `protobuf:"bytes,11,opt,name=proto2_field" json:"proto2_field,omitempty"` + Proto2Value map[string]*testdata.SubDefaults `protobuf:"bytes,13,rep,name=proto2_value" json:"proto2_value,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} + +func (m *Message) GetNested() *Nested { + if m != nil { + return m.Nested + } + return nil +} + +func (m *Message) GetTerrain() map[string]*Nested { + if m != nil { + return m.Terrain + } + return nil +} + +func (m *Message) GetProto2Field() *testdata.SubDefaults { + if m != nil { + return m.Proto2Field + } + return nil +} + +func (m *Message) GetProto2Value() map[string]*testdata.SubDefaults { + if m != nil { + return m.Proto2Value + } + return nil +} + +type Nested struct { + Bunny string `protobuf:"bytes,1,opt,name=bunny" json:"bunny,omitempty"` +} + +func (m *Nested) Reset() { *m = Nested{} } +func (m *Nested) String() string { return proto.CompactTextString(m) } +func (*Nested) ProtoMessage() {} + +type MessageWithMap struct { + ByteMapping map[bool][]byte `protobuf:"bytes,1,rep,name=byte_mapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } +func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } +func (*MessageWithMap) ProtoMessage() {} + +func (m *MessageWithMap) GetByteMapping() map[bool][]byte { + if m != nil { + return m.ByteMapping + } + return nil +} + +func init() { + proto.RegisterEnum("proto3_proto.Message_Humour", Message_Humour_name, Message_Humour_value) +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto new file mode 100644 index 000000000..e2311d929 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto @@ -0,0 +1,68 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +import "testdata/test.proto"; + +package proto3_proto; + +message Message { + enum Humour { + UNKNOWN = 0; + PUNS = 1; + SLAPSTICK = 2; + BILL_BAILEY = 3; + } + + string name = 1; + Humour hilarity = 2; + uint32 height_in_cm = 3; + bytes data = 4; + int64 result_count = 7; + bool true_scotsman = 8; + float score = 9; + + repeated uint64 key = 5; + Nested nested = 6; + + map terrain = 10; + testdata.SubDefaults proto2_field = 11; + map proto2_value = 13; +} + +message Nested { + string bunny = 1; +} + +message MessageWithMap { + map byte_mapping = 1; +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_test.go new file mode 100644 index 000000000..462f8055c --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_test.go @@ -0,0 +1,125 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "testing" + + "github.com/golang/protobuf/proto" + pb "github.com/golang/protobuf/proto/proto3_proto" + tpb "github.com/golang/protobuf/proto/testdata" +) + +func TestProto3ZeroValues(t *testing.T) { + tests := []struct { + desc string + m proto.Message + }{ + {"zero message", &pb.Message{}}, + {"empty bytes field", &pb.Message{Data: []byte{}}}, + } + for _, test := range tests { + b, err := proto.Marshal(test.m) + if err != nil { + t.Errorf("%s: proto.Marshal: %v", test.desc, err) + continue + } + if len(b) > 0 { + t.Errorf("%s: Encoding is non-empty: %q", test.desc, b) + } + } +} + +func TestRoundTripProto3(t *testing.T) { + m := &pb.Message{ + Name: "David", // (2 | 1<<3): 0x0a 0x05 "David" + Hilarity: pb.Message_PUNS, // (0 | 2<<3): 0x10 0x01 + HeightInCm: 178, // (0 | 3<<3): 0x18 0xb2 0x01 + Data: []byte("roboto"), // (2 | 4<<3): 0x20 0x06 "roboto" + ResultCount: 47, // (0 | 7<<3): 0x38 0x2f + TrueScotsman: true, // (0 | 8<<3): 0x40 0x01 + Score: 8.1, // (5 | 9<<3): 0x4d <8.1> + + Key: []uint64{1, 0xdeadbeef}, + Nested: &pb.Nested{ + Bunny: "Monty", + }, + } + t.Logf(" m: %v", m) + + b, err := proto.Marshal(m) + if err != nil { + t.Fatalf("proto.Marshal: %v", err) + } + t.Logf(" b: %q", b) + + m2 := new(pb.Message) + if err := proto.Unmarshal(b, m2); err != nil { + t.Fatalf("proto.Unmarshal: %v", err) + } + t.Logf("m2: %v", m2) + + if !proto.Equal(m, m2) { + t.Errorf("proto.Equal returned false:\n m: %v\nm2: %v", m, m2) + } +} + +func TestProto3SetDefaults(t *testing.T) { + in := &pb.Message{ + Terrain: map[string]*pb.Nested{ + "meadow": new(pb.Nested), + }, + Proto2Field: new(tpb.SubDefaults), + Proto2Value: map[string]*tpb.SubDefaults{ + "badlands": new(tpb.SubDefaults), + }, + } + + got := proto.Clone(in).(*pb.Message) + proto.SetDefaults(got) + + // There are no defaults in proto3. Everything should be the zero value, but + // we need to remember to set defaults for nested proto2 messages. + want := &pb.Message{ + Terrain: map[string]*pb.Nested{ + "meadow": new(pb.Nested), + }, + Proto2Field: &tpb.SubDefaults{N: proto.Int64(7)}, + Proto2Value: map[string]*tpb.SubDefaults{ + "badlands": &tpb.SubDefaults{N: proto.Int64(7)}, + }, + } + + if !proto.Equal(got, want) { + t.Errorf("with in = %v\nproto.SetDefaults(in) =>\ngot %v\nwant %v", in, got, want) + } +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/size2_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/size2_test.go new file mode 100644 index 000000000..a2729c39a --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/size2_test.go @@ -0,0 +1,63 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "testing" +) + +// This is a separate file and package from size_test.go because that one uses +// generated messages and thus may not be in package proto without having a circular +// dependency, whereas this file tests unexported details of size.go. + +func TestVarintSize(t *testing.T) { + // Check the edge cases carefully. + testCases := []struct { + n uint64 + size int + }{ + {0, 1}, + {1, 1}, + {127, 1}, + {128, 2}, + {16383, 2}, + {16384, 3}, + {1<<63 - 1, 9}, + {1 << 63, 10}, + } + for _, tc := range testCases { + size := sizeVarint(tc.n) + if size != tc.size { + t.Errorf("sizeVarint(%d) = %d, want %d", tc.n, size, tc.size) + } + } +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/size_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/size_test.go new file mode 100644 index 000000000..40c393ff2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/size_test.go @@ -0,0 +1,147 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "log" + "strings" + "testing" + + . "github.com/golang/protobuf/proto" + proto3pb "github.com/golang/protobuf/proto/proto3_proto" + pb "github.com/golang/protobuf/proto/testdata" +) + +var messageWithExtension1 = &pb.MyMessage{Count: Int32(7)} + +// messageWithExtension2 is in equal_test.go. +var messageWithExtension3 = &pb.MyMessage{Count: Int32(8)} + +func init() { + if err := SetExtension(messageWithExtension1, pb.E_Ext_More, &pb.Ext{Data: String("Abbott")}); err != nil { + log.Panicf("SetExtension: %v", err) + } + if err := SetExtension(messageWithExtension3, pb.E_Ext_More, &pb.Ext{Data: String("Costello")}); err != nil { + log.Panicf("SetExtension: %v", err) + } + + // Force messageWithExtension3 to have the extension encoded. + Marshal(messageWithExtension3) + +} + +var SizeTests = []struct { + desc string + pb Message +}{ + {"empty", &pb.OtherMessage{}}, + // Basic types. + {"bool", &pb.Defaults{F_Bool: Bool(true)}}, + {"int32", &pb.Defaults{F_Int32: Int32(12)}}, + {"negative int32", &pb.Defaults{F_Int32: Int32(-1)}}, + {"small int64", &pb.Defaults{F_Int64: Int64(1)}}, + {"big int64", &pb.Defaults{F_Int64: Int64(1 << 20)}}, + {"negative int64", &pb.Defaults{F_Int64: Int64(-1)}}, + {"fixed32", &pb.Defaults{F_Fixed32: Uint32(71)}}, + {"fixed64", &pb.Defaults{F_Fixed64: Uint64(72)}}, + {"uint32", &pb.Defaults{F_Uint32: Uint32(123)}}, + {"uint64", &pb.Defaults{F_Uint64: Uint64(124)}}, + {"float", &pb.Defaults{F_Float: Float32(12.6)}}, + {"double", &pb.Defaults{F_Double: Float64(13.9)}}, + {"string", &pb.Defaults{F_String: String("niles")}}, + {"bytes", &pb.Defaults{F_Bytes: []byte("wowsa")}}, + {"bytes, empty", &pb.Defaults{F_Bytes: []byte{}}}, + {"sint32", &pb.Defaults{F_Sint32: Int32(65)}}, + {"sint64", &pb.Defaults{F_Sint64: Int64(67)}}, + {"enum", &pb.Defaults{F_Enum: pb.Defaults_BLUE.Enum()}}, + // Repeated. + {"empty repeated bool", &pb.MoreRepeated{Bools: []bool{}}}, + {"repeated bool", &pb.MoreRepeated{Bools: []bool{false, true, true, false}}}, + {"packed repeated bool", &pb.MoreRepeated{BoolsPacked: []bool{false, true, true, false, true, true, true}}}, + {"repeated int32", &pb.MoreRepeated{Ints: []int32{1, 12203, 1729, -1}}}, + {"repeated int32 packed", &pb.MoreRepeated{IntsPacked: []int32{1, 12203, 1729}}}, + {"repeated int64 packed", &pb.MoreRepeated{Int64SPacked: []int64{ + // Need enough large numbers to verify that the header is counting the number of bytes + // for the field, not the number of elements. + 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, + 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, + }}}, + {"repeated string", &pb.MoreRepeated{Strings: []string{"r", "ken", "gri"}}}, + {"repeated fixed", &pb.MoreRepeated{Fixeds: []uint32{1, 2, 3, 4}}}, + // Nested. + {"nested", &pb.OldMessage{Nested: &pb.OldMessage_Nested{Name: String("whatever")}}}, + {"group", &pb.GroupOld{G: &pb.GroupOld_G{X: Int32(12345)}}}, + // Other things. + {"unrecognized", &pb.MoreRepeated{XXX_unrecognized: []byte{13<<3 | 0, 4}}}, + {"extension (unencoded)", messageWithExtension1}, + {"extension (encoded)", messageWithExtension3}, + // proto3 message + {"proto3 empty", &proto3pb.Message{}}, + {"proto3 bool", &proto3pb.Message{TrueScotsman: true}}, + {"proto3 int64", &proto3pb.Message{ResultCount: 1}}, + {"proto3 uint32", &proto3pb.Message{HeightInCm: 123}}, + {"proto3 float", &proto3pb.Message{Score: 12.6}}, + {"proto3 string", &proto3pb.Message{Name: "Snezana"}}, + {"proto3 bytes", &proto3pb.Message{Data: []byte("wowsa")}}, + {"proto3 bytes, empty", &proto3pb.Message{Data: []byte{}}}, + {"proto3 enum", &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}}, + {"proto3 map field with empty bytes", &proto3pb.MessageWithMap{ByteMapping: map[bool][]byte{false: []byte{}}}}, + + {"map field", &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob", 7: "Andrew"}}}, + {"map field with message", &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{0x7001: &pb.FloatingPoint{F: Float64(2.0)}}}}, + {"map field with bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte("this time for sure")}}}, + {"map field with empty bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte{}}}}, + + {"map field with big entry", &pb.MessageWithMap{NameMapping: map[int32]string{8: strings.Repeat("x", 125)}}}, + {"map field with big key and val", &pb.MessageWithMap{StrToStr: map[string]string{strings.Repeat("x", 70): strings.Repeat("y", 70)}}}, + {"map field with big numeric key", &pb.MessageWithMap{NameMapping: map[int32]string{0xf00d: "om nom nom"}}}, + + {"oneof not set", &pb.Communique{}}, + {"oneof zero int32", &pb.Communique{Union: &pb.Communique_Number{0}}}, + {"oneof int32", &pb.Communique{Union: &pb.Communique_Number{3}}}, + {"oneof string", &pb.Communique{Union: &pb.Communique_Name{"Rhythmic Fman"}}}, +} + +func TestSize(t *testing.T) { + for _, tc := range SizeTests { + size := Size(tc.pb) + b, err := Marshal(tc.pb) + if err != nil { + t.Errorf("%v: Marshal failed: %v", tc.desc, err) + continue + } + if size != len(b) { + t.Errorf("%v: Size(%v) = %d, want %d", tc.desc, tc.pb, size, len(b)) + t.Logf("%v: bytes: %#v", tc.desc, b) + } + } +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/Makefile b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/Makefile new file mode 100644 index 000000000..fc288628a --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/Makefile @@ -0,0 +1,50 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +include ../../Make.protobuf + +all: regenerate + +regenerate: + rm -f test.pb.go + make test.pb.go + +# The following rules are just aids to development. Not needed for typical testing. + +diff: regenerate + git diff test.pb.go + +restore: + cp test.pb.go.golden test.pb.go + +preserve: + cp test.pb.go test.pb.go.golden diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/golden_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/golden_test.go new file mode 100644 index 000000000..7172d0e96 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/golden_test.go @@ -0,0 +1,86 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Verify that the compiler output for test.proto is unchanged. + +package testdata + +import ( + "crypto/sha1" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "testing" +) + +// sum returns in string form (for easy comparison) the SHA-1 hash of the named file. +func sum(t *testing.T, name string) string { + data, err := ioutil.ReadFile(name) + if err != nil { + t.Fatal(err) + } + t.Logf("sum(%q): length is %d", name, len(data)) + hash := sha1.New() + _, err = hash.Write(data) + if err != nil { + t.Fatal(err) + } + return fmt.Sprintf("% x", hash.Sum(nil)) +} + +func run(t *testing.T, name string, args ...string) { + cmd := exec.Command(name, args...) + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + err := cmd.Run() + if err != nil { + t.Fatal(err) + } +} + +func TestGolden(t *testing.T) { + // Compute the original checksum. + goldenSum := sum(t, "test.pb.go") + // Run the proto compiler. + run(t, "protoc", "--go_out="+os.TempDir(), "test.proto") + newFile := filepath.Join(os.TempDir(), "test.pb.go") + defer os.Remove(newFile) + // Compute the new checksum. + newSum := sum(t, newFile) + // Verify + if newSum != goldenSum { + run(t, "diff", "-u", "test.pb.go", newFile) + t.Fatal("Code generated by protoc-gen-go has changed; update test.pb.go") + } +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.pb.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.pb.go new file mode 100644 index 000000000..7d308086e --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.pb.go @@ -0,0 +1,2948 @@ +// Code generated by protoc-gen-go. +// source: test.proto +// DO NOT EDIT! + +/* +Package testdata is a generated protocol buffer package. + +It is generated from these files: + test.proto + +It has these top-level messages: + GoEnum + GoTestField + GoTest + GoSkipTest + NonPackedTest + PackedTest + MaxTag + OldMessage + NewMessage + InnerMessage + OtherMessage + MyMessage + Ext + DefaultsMessage + MyMessageSet + Empty + MessageList + Strings + Defaults + SubDefaults + RepeatedEnum + MoreRepeated + GroupOld + GroupNew + FloatingPoint + MessageWithMap + Communique +*/ +package testdata + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type FOO int32 + +const ( + FOO_FOO1 FOO = 1 +) + +var FOO_name = map[int32]string{ + 1: "FOO1", +} +var FOO_value = map[string]int32{ + "FOO1": 1, +} + +func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p +} +func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) +} +func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data, "FOO") + if err != nil { + return err + } + *x = FOO(value) + return nil +} + +// An enum, for completeness. +type GoTest_KIND int32 + +const ( + GoTest_VOID GoTest_KIND = 0 + // Basic types + GoTest_BOOL GoTest_KIND = 1 + GoTest_BYTES GoTest_KIND = 2 + GoTest_FINGERPRINT GoTest_KIND = 3 + GoTest_FLOAT GoTest_KIND = 4 + GoTest_INT GoTest_KIND = 5 + GoTest_STRING GoTest_KIND = 6 + GoTest_TIME GoTest_KIND = 7 + // Groupings + GoTest_TUPLE GoTest_KIND = 8 + GoTest_ARRAY GoTest_KIND = 9 + GoTest_MAP GoTest_KIND = 10 + // Table types + GoTest_TABLE GoTest_KIND = 11 + // Functions + GoTest_FUNCTION GoTest_KIND = 12 +) + +var GoTest_KIND_name = map[int32]string{ + 0: "VOID", + 1: "BOOL", + 2: "BYTES", + 3: "FINGERPRINT", + 4: "FLOAT", + 5: "INT", + 6: "STRING", + 7: "TIME", + 8: "TUPLE", + 9: "ARRAY", + 10: "MAP", + 11: "TABLE", + 12: "FUNCTION", +} +var GoTest_KIND_value = map[string]int32{ + "VOID": 0, + "BOOL": 1, + "BYTES": 2, + "FINGERPRINT": 3, + "FLOAT": 4, + "INT": 5, + "STRING": 6, + "TIME": 7, + "TUPLE": 8, + "ARRAY": 9, + "MAP": 10, + "TABLE": 11, + "FUNCTION": 12, +} + +func (x GoTest_KIND) Enum() *GoTest_KIND { + p := new(GoTest_KIND) + *p = x + return p +} +func (x GoTest_KIND) String() string { + return proto.EnumName(GoTest_KIND_name, int32(x)) +} +func (x *GoTest_KIND) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(GoTest_KIND_value, data, "GoTest_KIND") + if err != nil { + return err + } + *x = GoTest_KIND(value) + return nil +} + +type MyMessage_Color int32 + +const ( + MyMessage_RED MyMessage_Color = 0 + MyMessage_GREEN MyMessage_Color = 1 + MyMessage_BLUE MyMessage_Color = 2 +) + +var MyMessage_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var MyMessage_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x MyMessage_Color) Enum() *MyMessage_Color { + p := new(MyMessage_Color) + *p = x + return p +} +func (x MyMessage_Color) String() string { + return proto.EnumName(MyMessage_Color_name, int32(x)) +} +func (x *MyMessage_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MyMessage_Color_value, data, "MyMessage_Color") + if err != nil { + return err + } + *x = MyMessage_Color(value) + return nil +} + +type DefaultsMessage_DefaultsEnum int32 + +const ( + DefaultsMessage_ZERO DefaultsMessage_DefaultsEnum = 0 + DefaultsMessage_ONE DefaultsMessage_DefaultsEnum = 1 + DefaultsMessage_TWO DefaultsMessage_DefaultsEnum = 2 +) + +var DefaultsMessage_DefaultsEnum_name = map[int32]string{ + 0: "ZERO", + 1: "ONE", + 2: "TWO", +} +var DefaultsMessage_DefaultsEnum_value = map[string]int32{ + "ZERO": 0, + "ONE": 1, + "TWO": 2, +} + +func (x DefaultsMessage_DefaultsEnum) Enum() *DefaultsMessage_DefaultsEnum { + p := new(DefaultsMessage_DefaultsEnum) + *p = x + return p +} +func (x DefaultsMessage_DefaultsEnum) String() string { + return proto.EnumName(DefaultsMessage_DefaultsEnum_name, int32(x)) +} +func (x *DefaultsMessage_DefaultsEnum) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(DefaultsMessage_DefaultsEnum_value, data, "DefaultsMessage_DefaultsEnum") + if err != nil { + return err + } + *x = DefaultsMessage_DefaultsEnum(value) + return nil +} + +type Defaults_Color int32 + +const ( + Defaults_RED Defaults_Color = 0 + Defaults_GREEN Defaults_Color = 1 + Defaults_BLUE Defaults_Color = 2 +) + +var Defaults_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var Defaults_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x Defaults_Color) Enum() *Defaults_Color { + p := new(Defaults_Color) + *p = x + return p +} +func (x Defaults_Color) String() string { + return proto.EnumName(Defaults_Color_name, int32(x)) +} +func (x *Defaults_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Defaults_Color_value, data, "Defaults_Color") + if err != nil { + return err + } + *x = Defaults_Color(value) + return nil +} + +type RepeatedEnum_Color int32 + +const ( + RepeatedEnum_RED RepeatedEnum_Color = 1 +) + +var RepeatedEnum_Color_name = map[int32]string{ + 1: "RED", +} +var RepeatedEnum_Color_value = map[string]int32{ + "RED": 1, +} + +func (x RepeatedEnum_Color) Enum() *RepeatedEnum_Color { + p := new(RepeatedEnum_Color) + *p = x + return p +} +func (x RepeatedEnum_Color) String() string { + return proto.EnumName(RepeatedEnum_Color_name, int32(x)) +} +func (x *RepeatedEnum_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RepeatedEnum_Color_value, data, "RepeatedEnum_Color") + if err != nil { + return err + } + *x = RepeatedEnum_Color(value) + return nil +} + +type GoEnum struct { + Foo *FOO `protobuf:"varint,1,req,name=foo,enum=testdata.FOO" json:"foo,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoEnum) Reset() { *m = GoEnum{} } +func (m *GoEnum) String() string { return proto.CompactTextString(m) } +func (*GoEnum) ProtoMessage() {} + +func (m *GoEnum) GetFoo() FOO { + if m != nil && m.Foo != nil { + return *m.Foo + } + return FOO_FOO1 +} + +type GoTestField struct { + Label *string `protobuf:"bytes,1,req,name=Label" json:"Label,omitempty"` + Type *string `protobuf:"bytes,2,req,name=Type" json:"Type,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTestField) Reset() { *m = GoTestField{} } +func (m *GoTestField) String() string { return proto.CompactTextString(m) } +func (*GoTestField) ProtoMessage() {} + +func (m *GoTestField) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" +} + +func (m *GoTestField) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return "" +} + +type GoTest struct { + // Some typical parameters + Kind *GoTest_KIND `protobuf:"varint,1,req,name=Kind,enum=testdata.GoTest_KIND" json:"Kind,omitempty"` + Table *string `protobuf:"bytes,2,opt,name=Table" json:"Table,omitempty"` + Param *int32 `protobuf:"varint,3,opt,name=Param" json:"Param,omitempty"` + // Required, repeated and optional foreign fields. + RequiredField *GoTestField `protobuf:"bytes,4,req,name=RequiredField" json:"RequiredField,omitempty"` + RepeatedField []*GoTestField `protobuf:"bytes,5,rep,name=RepeatedField" json:"RepeatedField,omitempty"` + OptionalField *GoTestField `protobuf:"bytes,6,opt,name=OptionalField" json:"OptionalField,omitempty"` + // Required fields of all basic types + F_BoolRequired *bool `protobuf:"varint,10,req,name=F_Bool_required" json:"F_Bool_required,omitempty"` + F_Int32Required *int32 `protobuf:"varint,11,req,name=F_Int32_required" json:"F_Int32_required,omitempty"` + F_Int64Required *int64 `protobuf:"varint,12,req,name=F_Int64_required" json:"F_Int64_required,omitempty"` + F_Fixed32Required *uint32 `protobuf:"fixed32,13,req,name=F_Fixed32_required" json:"F_Fixed32_required,omitempty"` + F_Fixed64Required *uint64 `protobuf:"fixed64,14,req,name=F_Fixed64_required" json:"F_Fixed64_required,omitempty"` + F_Uint32Required *uint32 `protobuf:"varint,15,req,name=F_Uint32_required" json:"F_Uint32_required,omitempty"` + F_Uint64Required *uint64 `protobuf:"varint,16,req,name=F_Uint64_required" json:"F_Uint64_required,omitempty"` + F_FloatRequired *float32 `protobuf:"fixed32,17,req,name=F_Float_required" json:"F_Float_required,omitempty"` + F_DoubleRequired *float64 `protobuf:"fixed64,18,req,name=F_Double_required" json:"F_Double_required,omitempty"` + F_StringRequired *string `protobuf:"bytes,19,req,name=F_String_required" json:"F_String_required,omitempty"` + F_BytesRequired []byte `protobuf:"bytes,101,req,name=F_Bytes_required" json:"F_Bytes_required,omitempty"` + F_Sint32Required *int32 `protobuf:"zigzag32,102,req,name=F_Sint32_required" json:"F_Sint32_required,omitempty"` + F_Sint64Required *int64 `protobuf:"zigzag64,103,req,name=F_Sint64_required" json:"F_Sint64_required,omitempty"` + // Repeated fields of all basic types + F_BoolRepeated []bool `protobuf:"varint,20,rep,name=F_Bool_repeated" json:"F_Bool_repeated,omitempty"` + F_Int32Repeated []int32 `protobuf:"varint,21,rep,name=F_Int32_repeated" json:"F_Int32_repeated,omitempty"` + F_Int64Repeated []int64 `protobuf:"varint,22,rep,name=F_Int64_repeated" json:"F_Int64_repeated,omitempty"` + F_Fixed32Repeated []uint32 `protobuf:"fixed32,23,rep,name=F_Fixed32_repeated" json:"F_Fixed32_repeated,omitempty"` + F_Fixed64Repeated []uint64 `protobuf:"fixed64,24,rep,name=F_Fixed64_repeated" json:"F_Fixed64_repeated,omitempty"` + F_Uint32Repeated []uint32 `protobuf:"varint,25,rep,name=F_Uint32_repeated" json:"F_Uint32_repeated,omitempty"` + F_Uint64Repeated []uint64 `protobuf:"varint,26,rep,name=F_Uint64_repeated" json:"F_Uint64_repeated,omitempty"` + F_FloatRepeated []float32 `protobuf:"fixed32,27,rep,name=F_Float_repeated" json:"F_Float_repeated,omitempty"` + F_DoubleRepeated []float64 `protobuf:"fixed64,28,rep,name=F_Double_repeated" json:"F_Double_repeated,omitempty"` + F_StringRepeated []string `protobuf:"bytes,29,rep,name=F_String_repeated" json:"F_String_repeated,omitempty"` + F_BytesRepeated [][]byte `protobuf:"bytes,201,rep,name=F_Bytes_repeated" json:"F_Bytes_repeated,omitempty"` + F_Sint32Repeated []int32 `protobuf:"zigzag32,202,rep,name=F_Sint32_repeated" json:"F_Sint32_repeated,omitempty"` + F_Sint64Repeated []int64 `protobuf:"zigzag64,203,rep,name=F_Sint64_repeated" json:"F_Sint64_repeated,omitempty"` + // Optional fields of all basic types + F_BoolOptional *bool `protobuf:"varint,30,opt,name=F_Bool_optional" json:"F_Bool_optional,omitempty"` + F_Int32Optional *int32 `protobuf:"varint,31,opt,name=F_Int32_optional" json:"F_Int32_optional,omitempty"` + F_Int64Optional *int64 `protobuf:"varint,32,opt,name=F_Int64_optional" json:"F_Int64_optional,omitempty"` + F_Fixed32Optional *uint32 `protobuf:"fixed32,33,opt,name=F_Fixed32_optional" json:"F_Fixed32_optional,omitempty"` + F_Fixed64Optional *uint64 `protobuf:"fixed64,34,opt,name=F_Fixed64_optional" json:"F_Fixed64_optional,omitempty"` + F_Uint32Optional *uint32 `protobuf:"varint,35,opt,name=F_Uint32_optional" json:"F_Uint32_optional,omitempty"` + F_Uint64Optional *uint64 `protobuf:"varint,36,opt,name=F_Uint64_optional" json:"F_Uint64_optional,omitempty"` + F_FloatOptional *float32 `protobuf:"fixed32,37,opt,name=F_Float_optional" json:"F_Float_optional,omitempty"` + F_DoubleOptional *float64 `protobuf:"fixed64,38,opt,name=F_Double_optional" json:"F_Double_optional,omitempty"` + F_StringOptional *string `protobuf:"bytes,39,opt,name=F_String_optional" json:"F_String_optional,omitempty"` + F_BytesOptional []byte `protobuf:"bytes,301,opt,name=F_Bytes_optional" json:"F_Bytes_optional,omitempty"` + F_Sint32Optional *int32 `protobuf:"zigzag32,302,opt,name=F_Sint32_optional" json:"F_Sint32_optional,omitempty"` + F_Sint64Optional *int64 `protobuf:"zigzag64,303,opt,name=F_Sint64_optional" json:"F_Sint64_optional,omitempty"` + // Default-valued fields of all basic types + F_BoolDefaulted *bool `protobuf:"varint,40,opt,name=F_Bool_defaulted,def=1" json:"F_Bool_defaulted,omitempty"` + F_Int32Defaulted *int32 `protobuf:"varint,41,opt,name=F_Int32_defaulted,def=32" json:"F_Int32_defaulted,omitempty"` + F_Int64Defaulted *int64 `protobuf:"varint,42,opt,name=F_Int64_defaulted,def=64" json:"F_Int64_defaulted,omitempty"` + F_Fixed32Defaulted *uint32 `protobuf:"fixed32,43,opt,name=F_Fixed32_defaulted,def=320" json:"F_Fixed32_defaulted,omitempty"` + F_Fixed64Defaulted *uint64 `protobuf:"fixed64,44,opt,name=F_Fixed64_defaulted,def=640" json:"F_Fixed64_defaulted,omitempty"` + F_Uint32Defaulted *uint32 `protobuf:"varint,45,opt,name=F_Uint32_defaulted,def=3200" json:"F_Uint32_defaulted,omitempty"` + F_Uint64Defaulted *uint64 `protobuf:"varint,46,opt,name=F_Uint64_defaulted,def=6400" json:"F_Uint64_defaulted,omitempty"` + F_FloatDefaulted *float32 `protobuf:"fixed32,47,opt,name=F_Float_defaulted,def=314159" json:"F_Float_defaulted,omitempty"` + F_DoubleDefaulted *float64 `protobuf:"fixed64,48,opt,name=F_Double_defaulted,def=271828" json:"F_Double_defaulted,omitempty"` + F_StringDefaulted *string `protobuf:"bytes,49,opt,name=F_String_defaulted,def=hello, \"world!\"\n" json:"F_String_defaulted,omitempty"` + F_BytesDefaulted []byte `protobuf:"bytes,401,opt,name=F_Bytes_defaulted,def=Bignose" json:"F_Bytes_defaulted,omitempty"` + F_Sint32Defaulted *int32 `protobuf:"zigzag32,402,opt,name=F_Sint32_defaulted,def=-32" json:"F_Sint32_defaulted,omitempty"` + F_Sint64Defaulted *int64 `protobuf:"zigzag64,403,opt,name=F_Sint64_defaulted,def=-64" json:"F_Sint64_defaulted,omitempty"` + // Packed repeated fields (no string or bytes). + F_BoolRepeatedPacked []bool `protobuf:"varint,50,rep,packed,name=F_Bool_repeated_packed" json:"F_Bool_repeated_packed,omitempty"` + F_Int32RepeatedPacked []int32 `protobuf:"varint,51,rep,packed,name=F_Int32_repeated_packed" json:"F_Int32_repeated_packed,omitempty"` + F_Int64RepeatedPacked []int64 `protobuf:"varint,52,rep,packed,name=F_Int64_repeated_packed" json:"F_Int64_repeated_packed,omitempty"` + F_Fixed32RepeatedPacked []uint32 `protobuf:"fixed32,53,rep,packed,name=F_Fixed32_repeated_packed" json:"F_Fixed32_repeated_packed,omitempty"` + F_Fixed64RepeatedPacked []uint64 `protobuf:"fixed64,54,rep,packed,name=F_Fixed64_repeated_packed" json:"F_Fixed64_repeated_packed,omitempty"` + F_Uint32RepeatedPacked []uint32 `protobuf:"varint,55,rep,packed,name=F_Uint32_repeated_packed" json:"F_Uint32_repeated_packed,omitempty"` + F_Uint64RepeatedPacked []uint64 `protobuf:"varint,56,rep,packed,name=F_Uint64_repeated_packed" json:"F_Uint64_repeated_packed,omitempty"` + F_FloatRepeatedPacked []float32 `protobuf:"fixed32,57,rep,packed,name=F_Float_repeated_packed" json:"F_Float_repeated_packed,omitempty"` + F_DoubleRepeatedPacked []float64 `protobuf:"fixed64,58,rep,packed,name=F_Double_repeated_packed" json:"F_Double_repeated_packed,omitempty"` + F_Sint32RepeatedPacked []int32 `protobuf:"zigzag32,502,rep,packed,name=F_Sint32_repeated_packed" json:"F_Sint32_repeated_packed,omitempty"` + F_Sint64RepeatedPacked []int64 `protobuf:"zigzag64,503,rep,packed,name=F_Sint64_repeated_packed" json:"F_Sint64_repeated_packed,omitempty"` + Requiredgroup *GoTest_RequiredGroup `protobuf:"group,70,req,name=RequiredGroup" json:"requiredgroup,omitempty"` + Repeatedgroup []*GoTest_RepeatedGroup `protobuf:"group,80,rep,name=RepeatedGroup" json:"repeatedgroup,omitempty"` + Optionalgroup *GoTest_OptionalGroup `protobuf:"group,90,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest) Reset() { *m = GoTest{} } +func (m *GoTest) String() string { return proto.CompactTextString(m) } +func (*GoTest) ProtoMessage() {} + +const Default_GoTest_F_BoolDefaulted bool = true +const Default_GoTest_F_Int32Defaulted int32 = 32 +const Default_GoTest_F_Int64Defaulted int64 = 64 +const Default_GoTest_F_Fixed32Defaulted uint32 = 320 +const Default_GoTest_F_Fixed64Defaulted uint64 = 640 +const Default_GoTest_F_Uint32Defaulted uint32 = 3200 +const Default_GoTest_F_Uint64Defaulted uint64 = 6400 +const Default_GoTest_F_FloatDefaulted float32 = 314159 +const Default_GoTest_F_DoubleDefaulted float64 = 271828 +const Default_GoTest_F_StringDefaulted string = "hello, \"world!\"\n" + +var Default_GoTest_F_BytesDefaulted []byte = []byte("Bignose") + +const Default_GoTest_F_Sint32Defaulted int32 = -32 +const Default_GoTest_F_Sint64Defaulted int64 = -64 + +func (m *GoTest) GetKind() GoTest_KIND { + if m != nil && m.Kind != nil { + return *m.Kind + } + return GoTest_VOID +} + +func (m *GoTest) GetTable() string { + if m != nil && m.Table != nil { + return *m.Table + } + return "" +} + +func (m *GoTest) GetParam() int32 { + if m != nil && m.Param != nil { + return *m.Param + } + return 0 +} + +func (m *GoTest) GetRequiredField() *GoTestField { + if m != nil { + return m.RequiredField + } + return nil +} + +func (m *GoTest) GetRepeatedField() []*GoTestField { + if m != nil { + return m.RepeatedField + } + return nil +} + +func (m *GoTest) GetOptionalField() *GoTestField { + if m != nil { + return m.OptionalField + } + return nil +} + +func (m *GoTest) GetF_BoolRequired() bool { + if m != nil && m.F_BoolRequired != nil { + return *m.F_BoolRequired + } + return false +} + +func (m *GoTest) GetF_Int32Required() int32 { + if m != nil && m.F_Int32Required != nil { + return *m.F_Int32Required + } + return 0 +} + +func (m *GoTest) GetF_Int64Required() int64 { + if m != nil && m.F_Int64Required != nil { + return *m.F_Int64Required + } + return 0 +} + +func (m *GoTest) GetF_Fixed32Required() uint32 { + if m != nil && m.F_Fixed32Required != nil { + return *m.F_Fixed32Required + } + return 0 +} + +func (m *GoTest) GetF_Fixed64Required() uint64 { + if m != nil && m.F_Fixed64Required != nil { + return *m.F_Fixed64Required + } + return 0 +} + +func (m *GoTest) GetF_Uint32Required() uint32 { + if m != nil && m.F_Uint32Required != nil { + return *m.F_Uint32Required + } + return 0 +} + +func (m *GoTest) GetF_Uint64Required() uint64 { + if m != nil && m.F_Uint64Required != nil { + return *m.F_Uint64Required + } + return 0 +} + +func (m *GoTest) GetF_FloatRequired() float32 { + if m != nil && m.F_FloatRequired != nil { + return *m.F_FloatRequired + } + return 0 +} + +func (m *GoTest) GetF_DoubleRequired() float64 { + if m != nil && m.F_DoubleRequired != nil { + return *m.F_DoubleRequired + } + return 0 +} + +func (m *GoTest) GetF_StringRequired() string { + if m != nil && m.F_StringRequired != nil { + return *m.F_StringRequired + } + return "" +} + +func (m *GoTest) GetF_BytesRequired() []byte { + if m != nil { + return m.F_BytesRequired + } + return nil +} + +func (m *GoTest) GetF_Sint32Required() int32 { + if m != nil && m.F_Sint32Required != nil { + return *m.F_Sint32Required + } + return 0 +} + +func (m *GoTest) GetF_Sint64Required() int64 { + if m != nil && m.F_Sint64Required != nil { + return *m.F_Sint64Required + } + return 0 +} + +func (m *GoTest) GetF_BoolRepeated() []bool { + if m != nil { + return m.F_BoolRepeated + } + return nil +} + +func (m *GoTest) GetF_Int32Repeated() []int32 { + if m != nil { + return m.F_Int32Repeated + } + return nil +} + +func (m *GoTest) GetF_Int64Repeated() []int64 { + if m != nil { + return m.F_Int64Repeated + } + return nil +} + +func (m *GoTest) GetF_Fixed32Repeated() []uint32 { + if m != nil { + return m.F_Fixed32Repeated + } + return nil +} + +func (m *GoTest) GetF_Fixed64Repeated() []uint64 { + if m != nil { + return m.F_Fixed64Repeated + } + return nil +} + +func (m *GoTest) GetF_Uint32Repeated() []uint32 { + if m != nil { + return m.F_Uint32Repeated + } + return nil +} + +func (m *GoTest) GetF_Uint64Repeated() []uint64 { + if m != nil { + return m.F_Uint64Repeated + } + return nil +} + +func (m *GoTest) GetF_FloatRepeated() []float32 { + if m != nil { + return m.F_FloatRepeated + } + return nil +} + +func (m *GoTest) GetF_DoubleRepeated() []float64 { + if m != nil { + return m.F_DoubleRepeated + } + return nil +} + +func (m *GoTest) GetF_StringRepeated() []string { + if m != nil { + return m.F_StringRepeated + } + return nil +} + +func (m *GoTest) GetF_BytesRepeated() [][]byte { + if m != nil { + return m.F_BytesRepeated + } + return nil +} + +func (m *GoTest) GetF_Sint32Repeated() []int32 { + if m != nil { + return m.F_Sint32Repeated + } + return nil +} + +func (m *GoTest) GetF_Sint64Repeated() []int64 { + if m != nil { + return m.F_Sint64Repeated + } + return nil +} + +func (m *GoTest) GetF_BoolOptional() bool { + if m != nil && m.F_BoolOptional != nil { + return *m.F_BoolOptional + } + return false +} + +func (m *GoTest) GetF_Int32Optional() int32 { + if m != nil && m.F_Int32Optional != nil { + return *m.F_Int32Optional + } + return 0 +} + +func (m *GoTest) GetF_Int64Optional() int64 { + if m != nil && m.F_Int64Optional != nil { + return *m.F_Int64Optional + } + return 0 +} + +func (m *GoTest) GetF_Fixed32Optional() uint32 { + if m != nil && m.F_Fixed32Optional != nil { + return *m.F_Fixed32Optional + } + return 0 +} + +func (m *GoTest) GetF_Fixed64Optional() uint64 { + if m != nil && m.F_Fixed64Optional != nil { + return *m.F_Fixed64Optional + } + return 0 +} + +func (m *GoTest) GetF_Uint32Optional() uint32 { + if m != nil && m.F_Uint32Optional != nil { + return *m.F_Uint32Optional + } + return 0 +} + +func (m *GoTest) GetF_Uint64Optional() uint64 { + if m != nil && m.F_Uint64Optional != nil { + return *m.F_Uint64Optional + } + return 0 +} + +func (m *GoTest) GetF_FloatOptional() float32 { + if m != nil && m.F_FloatOptional != nil { + return *m.F_FloatOptional + } + return 0 +} + +func (m *GoTest) GetF_DoubleOptional() float64 { + if m != nil && m.F_DoubleOptional != nil { + return *m.F_DoubleOptional + } + return 0 +} + +func (m *GoTest) GetF_StringOptional() string { + if m != nil && m.F_StringOptional != nil { + return *m.F_StringOptional + } + return "" +} + +func (m *GoTest) GetF_BytesOptional() []byte { + if m != nil { + return m.F_BytesOptional + } + return nil +} + +func (m *GoTest) GetF_Sint32Optional() int32 { + if m != nil && m.F_Sint32Optional != nil { + return *m.F_Sint32Optional + } + return 0 +} + +func (m *GoTest) GetF_Sint64Optional() int64 { + if m != nil && m.F_Sint64Optional != nil { + return *m.F_Sint64Optional + } + return 0 +} + +func (m *GoTest) GetF_BoolDefaulted() bool { + if m != nil && m.F_BoolDefaulted != nil { + return *m.F_BoolDefaulted + } + return Default_GoTest_F_BoolDefaulted +} + +func (m *GoTest) GetF_Int32Defaulted() int32 { + if m != nil && m.F_Int32Defaulted != nil { + return *m.F_Int32Defaulted + } + return Default_GoTest_F_Int32Defaulted +} + +func (m *GoTest) GetF_Int64Defaulted() int64 { + if m != nil && m.F_Int64Defaulted != nil { + return *m.F_Int64Defaulted + } + return Default_GoTest_F_Int64Defaulted +} + +func (m *GoTest) GetF_Fixed32Defaulted() uint32 { + if m != nil && m.F_Fixed32Defaulted != nil { + return *m.F_Fixed32Defaulted + } + return Default_GoTest_F_Fixed32Defaulted +} + +func (m *GoTest) GetF_Fixed64Defaulted() uint64 { + if m != nil && m.F_Fixed64Defaulted != nil { + return *m.F_Fixed64Defaulted + } + return Default_GoTest_F_Fixed64Defaulted +} + +func (m *GoTest) GetF_Uint32Defaulted() uint32 { + if m != nil && m.F_Uint32Defaulted != nil { + return *m.F_Uint32Defaulted + } + return Default_GoTest_F_Uint32Defaulted +} + +func (m *GoTest) GetF_Uint64Defaulted() uint64 { + if m != nil && m.F_Uint64Defaulted != nil { + return *m.F_Uint64Defaulted + } + return Default_GoTest_F_Uint64Defaulted +} + +func (m *GoTest) GetF_FloatDefaulted() float32 { + if m != nil && m.F_FloatDefaulted != nil { + return *m.F_FloatDefaulted + } + return Default_GoTest_F_FloatDefaulted +} + +func (m *GoTest) GetF_DoubleDefaulted() float64 { + if m != nil && m.F_DoubleDefaulted != nil { + return *m.F_DoubleDefaulted + } + return Default_GoTest_F_DoubleDefaulted +} + +func (m *GoTest) GetF_StringDefaulted() string { + if m != nil && m.F_StringDefaulted != nil { + return *m.F_StringDefaulted + } + return Default_GoTest_F_StringDefaulted +} + +func (m *GoTest) GetF_BytesDefaulted() []byte { + if m != nil && m.F_BytesDefaulted != nil { + return m.F_BytesDefaulted + } + return append([]byte(nil), Default_GoTest_F_BytesDefaulted...) +} + +func (m *GoTest) GetF_Sint32Defaulted() int32 { + if m != nil && m.F_Sint32Defaulted != nil { + return *m.F_Sint32Defaulted + } + return Default_GoTest_F_Sint32Defaulted +} + +func (m *GoTest) GetF_Sint64Defaulted() int64 { + if m != nil && m.F_Sint64Defaulted != nil { + return *m.F_Sint64Defaulted + } + return Default_GoTest_F_Sint64Defaulted +} + +func (m *GoTest) GetF_BoolRepeatedPacked() []bool { + if m != nil { + return m.F_BoolRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Int32RepeatedPacked() []int32 { + if m != nil { + return m.F_Int32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Int64RepeatedPacked() []int64 { + if m != nil { + return m.F_Int64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Fixed32RepeatedPacked() []uint32 { + if m != nil { + return m.F_Fixed32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Fixed64RepeatedPacked() []uint64 { + if m != nil { + return m.F_Fixed64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Uint32RepeatedPacked() []uint32 { + if m != nil { + return m.F_Uint32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Uint64RepeatedPacked() []uint64 { + if m != nil { + return m.F_Uint64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_FloatRepeatedPacked() []float32 { + if m != nil { + return m.F_FloatRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_DoubleRepeatedPacked() []float64 { + if m != nil { + return m.F_DoubleRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Sint32RepeatedPacked() []int32 { + if m != nil { + return m.F_Sint32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Sint64RepeatedPacked() []int64 { + if m != nil { + return m.F_Sint64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetRequiredgroup() *GoTest_RequiredGroup { + if m != nil { + return m.Requiredgroup + } + return nil +} + +func (m *GoTest) GetRepeatedgroup() []*GoTest_RepeatedGroup { + if m != nil { + return m.Repeatedgroup + } + return nil +} + +func (m *GoTest) GetOptionalgroup() *GoTest_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil +} + +// Required, repeated, and optional groups. +type GoTest_RequiredGroup struct { + RequiredField *string `protobuf:"bytes,71,req,name=RequiredField" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest_RequiredGroup) Reset() { *m = GoTest_RequiredGroup{} } +func (m *GoTest_RequiredGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_RequiredGroup) ProtoMessage() {} + +func (m *GoTest_RequiredGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +type GoTest_RepeatedGroup struct { + RequiredField *string `protobuf:"bytes,81,req,name=RequiredField" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest_RepeatedGroup) Reset() { *m = GoTest_RepeatedGroup{} } +func (m *GoTest_RepeatedGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_RepeatedGroup) ProtoMessage() {} + +func (m *GoTest_RepeatedGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +type GoTest_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,91,req,name=RequiredField" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest_OptionalGroup) Reset() { *m = GoTest_OptionalGroup{} } +func (m *GoTest_OptionalGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_OptionalGroup) ProtoMessage() {} + +func (m *GoTest_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +// For testing skipping of unrecognized fields. +// Numbers are all big, larger than tag numbers in GoTestField, +// the message used in the corresponding test. +type GoSkipTest struct { + SkipInt32 *int32 `protobuf:"varint,11,req,name=skip_int32" json:"skip_int32,omitempty"` + SkipFixed32 *uint32 `protobuf:"fixed32,12,req,name=skip_fixed32" json:"skip_fixed32,omitempty"` + SkipFixed64 *uint64 `protobuf:"fixed64,13,req,name=skip_fixed64" json:"skip_fixed64,omitempty"` + SkipString *string `protobuf:"bytes,14,req,name=skip_string" json:"skip_string,omitempty"` + Skipgroup *GoSkipTest_SkipGroup `protobuf:"group,15,req,name=SkipGroup" json:"skipgroup,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoSkipTest) Reset() { *m = GoSkipTest{} } +func (m *GoSkipTest) String() string { return proto.CompactTextString(m) } +func (*GoSkipTest) ProtoMessage() {} + +func (m *GoSkipTest) GetSkipInt32() int32 { + if m != nil && m.SkipInt32 != nil { + return *m.SkipInt32 + } + return 0 +} + +func (m *GoSkipTest) GetSkipFixed32() uint32 { + if m != nil && m.SkipFixed32 != nil { + return *m.SkipFixed32 + } + return 0 +} + +func (m *GoSkipTest) GetSkipFixed64() uint64 { + if m != nil && m.SkipFixed64 != nil { + return *m.SkipFixed64 + } + return 0 +} + +func (m *GoSkipTest) GetSkipString() string { + if m != nil && m.SkipString != nil { + return *m.SkipString + } + return "" +} + +func (m *GoSkipTest) GetSkipgroup() *GoSkipTest_SkipGroup { + if m != nil { + return m.Skipgroup + } + return nil +} + +type GoSkipTest_SkipGroup struct { + GroupInt32 *int32 `protobuf:"varint,16,req,name=group_int32" json:"group_int32,omitempty"` + GroupString *string `protobuf:"bytes,17,req,name=group_string" json:"group_string,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoSkipTest_SkipGroup) Reset() { *m = GoSkipTest_SkipGroup{} } +func (m *GoSkipTest_SkipGroup) String() string { return proto.CompactTextString(m) } +func (*GoSkipTest_SkipGroup) ProtoMessage() {} + +func (m *GoSkipTest_SkipGroup) GetGroupInt32() int32 { + if m != nil && m.GroupInt32 != nil { + return *m.GroupInt32 + } + return 0 +} + +func (m *GoSkipTest_SkipGroup) GetGroupString() string { + if m != nil && m.GroupString != nil { + return *m.GroupString + } + return "" +} + +// For testing packed/non-packed decoder switching. +// A serialized instance of one should be deserializable as the other. +type NonPackedTest struct { + A []int32 `protobuf:"varint,1,rep,name=a" json:"a,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NonPackedTest) Reset() { *m = NonPackedTest{} } +func (m *NonPackedTest) String() string { return proto.CompactTextString(m) } +func (*NonPackedTest) ProtoMessage() {} + +func (m *NonPackedTest) GetA() []int32 { + if m != nil { + return m.A + } + return nil +} + +type PackedTest struct { + B []int32 `protobuf:"varint,1,rep,packed,name=b" json:"b,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PackedTest) Reset() { *m = PackedTest{} } +func (m *PackedTest) String() string { return proto.CompactTextString(m) } +func (*PackedTest) ProtoMessage() {} + +func (m *PackedTest) GetB() []int32 { + if m != nil { + return m.B + } + return nil +} + +type MaxTag struct { + // Maximum possible tag number. + LastField *string `protobuf:"bytes,536870911,opt,name=last_field" json:"last_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MaxTag) Reset() { *m = MaxTag{} } +func (m *MaxTag) String() string { return proto.CompactTextString(m) } +func (*MaxTag) ProtoMessage() {} + +func (m *MaxTag) GetLastField() string { + if m != nil && m.LastField != nil { + return *m.LastField + } + return "" +} + +type OldMessage struct { + Nested *OldMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` + Num *int32 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OldMessage) Reset() { *m = OldMessage{} } +func (m *OldMessage) String() string { return proto.CompactTextString(m) } +func (*OldMessage) ProtoMessage() {} + +func (m *OldMessage) GetNested() *OldMessage_Nested { + if m != nil { + return m.Nested + } + return nil +} + +func (m *OldMessage) GetNum() int32 { + if m != nil && m.Num != nil { + return *m.Num + } + return 0 +} + +type OldMessage_Nested struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OldMessage_Nested) Reset() { *m = OldMessage_Nested{} } +func (m *OldMessage_Nested) String() string { return proto.CompactTextString(m) } +func (*OldMessage_Nested) ProtoMessage() {} + +func (m *OldMessage_Nested) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +// NewMessage is wire compatible with OldMessage; +// imagine it as a future version. +type NewMessage struct { + Nested *NewMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` + // This is an int32 in OldMessage. + Num *int64 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NewMessage) Reset() { *m = NewMessage{} } +func (m *NewMessage) String() string { return proto.CompactTextString(m) } +func (*NewMessage) ProtoMessage() {} + +func (m *NewMessage) GetNested() *NewMessage_Nested { + if m != nil { + return m.Nested + } + return nil +} + +func (m *NewMessage) GetNum() int64 { + if m != nil && m.Num != nil { + return *m.Num + } + return 0 +} + +type NewMessage_Nested struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + FoodGroup *string `protobuf:"bytes,2,opt,name=food_group" json:"food_group,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NewMessage_Nested) Reset() { *m = NewMessage_Nested{} } +func (m *NewMessage_Nested) String() string { return proto.CompactTextString(m) } +func (*NewMessage_Nested) ProtoMessage() {} + +func (m *NewMessage_Nested) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *NewMessage_Nested) GetFoodGroup() string { + if m != nil && m.FoodGroup != nil { + return *m.FoodGroup + } + return "" +} + +type InnerMessage struct { + Host *string `protobuf:"bytes,1,req,name=host" json:"host,omitempty"` + Port *int32 `protobuf:"varint,2,opt,name=port,def=4000" json:"port,omitempty"` + Connected *bool `protobuf:"varint,3,opt,name=connected" json:"connected,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *InnerMessage) Reset() { *m = InnerMessage{} } +func (m *InnerMessage) String() string { return proto.CompactTextString(m) } +func (*InnerMessage) ProtoMessage() {} + +const Default_InnerMessage_Port int32 = 4000 + +func (m *InnerMessage) GetHost() string { + if m != nil && m.Host != nil { + return *m.Host + } + return "" +} + +func (m *InnerMessage) GetPort() int32 { + if m != nil && m.Port != nil { + return *m.Port + } + return Default_InnerMessage_Port +} + +func (m *InnerMessage) GetConnected() bool { + if m != nil && m.Connected != nil { + return *m.Connected + } + return false +} + +type OtherMessage struct { + Key *int64 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Weight *float32 `protobuf:"fixed32,3,opt,name=weight" json:"weight,omitempty"` + Inner *InnerMessage `protobuf:"bytes,4,opt,name=inner" json:"inner,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OtherMessage) Reset() { *m = OtherMessage{} } +func (m *OtherMessage) String() string { return proto.CompactTextString(m) } +func (*OtherMessage) ProtoMessage() {} + +func (m *OtherMessage) GetKey() int64 { + if m != nil && m.Key != nil { + return *m.Key + } + return 0 +} + +func (m *OtherMessage) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *OtherMessage) GetWeight() float32 { + if m != nil && m.Weight != nil { + return *m.Weight + } + return 0 +} + +func (m *OtherMessage) GetInner() *InnerMessage { + if m != nil { + return m.Inner + } + return nil +} + +type MyMessage struct { + Count *int32 `protobuf:"varint,1,req,name=count" json:"count,omitempty"` + Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + Quote *string `protobuf:"bytes,3,opt,name=quote" json:"quote,omitempty"` + Pet []string `protobuf:"bytes,4,rep,name=pet" json:"pet,omitempty"` + Inner *InnerMessage `protobuf:"bytes,5,opt,name=inner" json:"inner,omitempty"` + Others []*OtherMessage `protobuf:"bytes,6,rep,name=others" json:"others,omitempty"` + RepInner []*InnerMessage `protobuf:"bytes,12,rep,name=rep_inner" json:"rep_inner,omitempty"` + Bikeshed *MyMessage_Color `protobuf:"varint,7,opt,name=bikeshed,enum=testdata.MyMessage_Color" json:"bikeshed,omitempty"` + Somegroup *MyMessage_SomeGroup `protobuf:"group,8,opt,name=SomeGroup" json:"somegroup,omitempty"` + // This field becomes [][]byte in the generated code. + RepBytes [][]byte `protobuf:"bytes,10,rep,name=rep_bytes" json:"rep_bytes,omitempty"` + Bigfloat *float64 `protobuf:"fixed64,11,opt,name=bigfloat" json:"bigfloat,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MyMessage) Reset() { *m = MyMessage{} } +func (m *MyMessage) String() string { return proto.CompactTextString(m) } +func (*MyMessage) ProtoMessage() {} + +var extRange_MyMessage = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*MyMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MyMessage +} +func (m *MyMessage) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +func (m *MyMessage) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *MyMessage) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MyMessage) GetQuote() string { + if m != nil && m.Quote != nil { + return *m.Quote + } + return "" +} + +func (m *MyMessage) GetPet() []string { + if m != nil { + return m.Pet + } + return nil +} + +func (m *MyMessage) GetInner() *InnerMessage { + if m != nil { + return m.Inner + } + return nil +} + +func (m *MyMessage) GetOthers() []*OtherMessage { + if m != nil { + return m.Others + } + return nil +} + +func (m *MyMessage) GetRepInner() []*InnerMessage { + if m != nil { + return m.RepInner + } + return nil +} + +func (m *MyMessage) GetBikeshed() MyMessage_Color { + if m != nil && m.Bikeshed != nil { + return *m.Bikeshed + } + return MyMessage_RED +} + +func (m *MyMessage) GetSomegroup() *MyMessage_SomeGroup { + if m != nil { + return m.Somegroup + } + return nil +} + +func (m *MyMessage) GetRepBytes() [][]byte { + if m != nil { + return m.RepBytes + } + return nil +} + +func (m *MyMessage) GetBigfloat() float64 { + if m != nil && m.Bigfloat != nil { + return *m.Bigfloat + } + return 0 +} + +type MyMessage_SomeGroup struct { + GroupField *int32 `protobuf:"varint,9,opt,name=group_field" json:"group_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MyMessage_SomeGroup) Reset() { *m = MyMessage_SomeGroup{} } +func (m *MyMessage_SomeGroup) String() string { return proto.CompactTextString(m) } +func (*MyMessage_SomeGroup) ProtoMessage() {} + +func (m *MyMessage_SomeGroup) GetGroupField() int32 { + if m != nil && m.GroupField != nil { + return *m.GroupField + } + return 0 +} + +type Ext struct { + Data *string `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Ext) Reset() { *m = Ext{} } +func (m *Ext) String() string { return proto.CompactTextString(m) } +func (*Ext) ProtoMessage() {} + +func (m *Ext) GetData() string { + if m != nil && m.Data != nil { + return *m.Data + } + return "" +} + +var E_Ext_More = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*Ext)(nil), + Field: 103, + Name: "testdata.Ext.more", + Tag: "bytes,103,opt,name=more", +} + +var E_Ext_Text = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*string)(nil), + Field: 104, + Name: "testdata.Ext.text", + Tag: "bytes,104,opt,name=text", +} + +var E_Ext_Number = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 105, + Name: "testdata.Ext.number", + Tag: "varint,105,opt,name=number", +} + +type DefaultsMessage struct { + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DefaultsMessage) Reset() { *m = DefaultsMessage{} } +func (m *DefaultsMessage) String() string { return proto.CompactTextString(m) } +func (*DefaultsMessage) ProtoMessage() {} + +var extRange_DefaultsMessage = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*DefaultsMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_DefaultsMessage +} +func (m *DefaultsMessage) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +type MyMessageSet struct { + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MyMessageSet) Reset() { *m = MyMessageSet{} } +func (m *MyMessageSet) String() string { return proto.CompactTextString(m) } +func (*MyMessageSet) ProtoMessage() {} + +func (m *MyMessageSet) Marshal() ([]byte, error) { + return proto.MarshalMessageSet(m.ExtensionMap()) +} +func (m *MyMessageSet) Unmarshal(buf []byte) error { + return proto.UnmarshalMessageSet(buf, m.ExtensionMap()) +} +func (m *MyMessageSet) MarshalJSON() ([]byte, error) { + return proto.MarshalMessageSetJSON(m.XXX_extensions) +} +func (m *MyMessageSet) UnmarshalJSON(buf []byte) error { + return proto.UnmarshalMessageSetJSON(buf, m.XXX_extensions) +} + +// ensure MyMessageSet satisfies proto.Marshaler and proto.Unmarshaler +var _ proto.Marshaler = (*MyMessageSet)(nil) +var _ proto.Unmarshaler = (*MyMessageSet)(nil) + +var extRange_MyMessageSet = []proto.ExtensionRange{ + {100, 2147483646}, +} + +func (*MyMessageSet) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MyMessageSet +} +func (m *MyMessageSet) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +type Empty struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} + +type MessageList struct { + Message []*MessageList_Message `protobuf:"group,1,rep,name=Message" json:"message,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageList) Reset() { *m = MessageList{} } +func (m *MessageList) String() string { return proto.CompactTextString(m) } +func (*MessageList) ProtoMessage() {} + +func (m *MessageList) GetMessage() []*MessageList_Message { + if m != nil { + return m.Message + } + return nil +} + +type MessageList_Message struct { + Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"` + Count *int32 `protobuf:"varint,3,req,name=count" json:"count,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageList_Message) Reset() { *m = MessageList_Message{} } +func (m *MessageList_Message) String() string { return proto.CompactTextString(m) } +func (*MessageList_Message) ProtoMessage() {} + +func (m *MessageList_Message) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MessageList_Message) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +type Strings struct { + StringField *string `protobuf:"bytes,1,opt,name=string_field" json:"string_field,omitempty"` + BytesField []byte `protobuf:"bytes,2,opt,name=bytes_field" json:"bytes_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Strings) Reset() { *m = Strings{} } +func (m *Strings) String() string { return proto.CompactTextString(m) } +func (*Strings) ProtoMessage() {} + +func (m *Strings) GetStringField() string { + if m != nil && m.StringField != nil { + return *m.StringField + } + return "" +} + +func (m *Strings) GetBytesField() []byte { + if m != nil { + return m.BytesField + } + return nil +} + +type Defaults struct { + // Default-valued fields of all basic types. + // Same as GoTest, but copied here to make testing easier. + F_Bool *bool `protobuf:"varint,1,opt,name=F_Bool,def=1" json:"F_Bool,omitempty"` + F_Int32 *int32 `protobuf:"varint,2,opt,name=F_Int32,def=32" json:"F_Int32,omitempty"` + F_Int64 *int64 `protobuf:"varint,3,opt,name=F_Int64,def=64" json:"F_Int64,omitempty"` + F_Fixed32 *uint32 `protobuf:"fixed32,4,opt,name=F_Fixed32,def=320" json:"F_Fixed32,omitempty"` + F_Fixed64 *uint64 `protobuf:"fixed64,5,opt,name=F_Fixed64,def=640" json:"F_Fixed64,omitempty"` + F_Uint32 *uint32 `protobuf:"varint,6,opt,name=F_Uint32,def=3200" json:"F_Uint32,omitempty"` + F_Uint64 *uint64 `protobuf:"varint,7,opt,name=F_Uint64,def=6400" json:"F_Uint64,omitempty"` + F_Float *float32 `protobuf:"fixed32,8,opt,name=F_Float,def=314159" json:"F_Float,omitempty"` + F_Double *float64 `protobuf:"fixed64,9,opt,name=F_Double,def=271828" json:"F_Double,omitempty"` + F_String *string `protobuf:"bytes,10,opt,name=F_String,def=hello, \"world!\"\n" json:"F_String,omitempty"` + F_Bytes []byte `protobuf:"bytes,11,opt,name=F_Bytes,def=Bignose" json:"F_Bytes,omitempty"` + F_Sint32 *int32 `protobuf:"zigzag32,12,opt,name=F_Sint32,def=-32" json:"F_Sint32,omitempty"` + F_Sint64 *int64 `protobuf:"zigzag64,13,opt,name=F_Sint64,def=-64" json:"F_Sint64,omitempty"` + F_Enum *Defaults_Color `protobuf:"varint,14,opt,name=F_Enum,enum=testdata.Defaults_Color,def=1" json:"F_Enum,omitempty"` + // More fields with crazy defaults. + F_Pinf *float32 `protobuf:"fixed32,15,opt,name=F_Pinf,def=inf" json:"F_Pinf,omitempty"` + F_Ninf *float32 `protobuf:"fixed32,16,opt,name=F_Ninf,def=-inf" json:"F_Ninf,omitempty"` + F_Nan *float32 `protobuf:"fixed32,17,opt,name=F_Nan,def=nan" json:"F_Nan,omitempty"` + // Sub-message. + Sub *SubDefaults `protobuf:"bytes,18,opt,name=sub" json:"sub,omitempty"` + // Redundant but explicit defaults. + StrZero *string `protobuf:"bytes,19,opt,name=str_zero,def=" json:"str_zero,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Defaults) Reset() { *m = Defaults{} } +func (m *Defaults) String() string { return proto.CompactTextString(m) } +func (*Defaults) ProtoMessage() {} + +const Default_Defaults_F_Bool bool = true +const Default_Defaults_F_Int32 int32 = 32 +const Default_Defaults_F_Int64 int64 = 64 +const Default_Defaults_F_Fixed32 uint32 = 320 +const Default_Defaults_F_Fixed64 uint64 = 640 +const Default_Defaults_F_Uint32 uint32 = 3200 +const Default_Defaults_F_Uint64 uint64 = 6400 +const Default_Defaults_F_Float float32 = 314159 +const Default_Defaults_F_Double float64 = 271828 +const Default_Defaults_F_String string = "hello, \"world!\"\n" + +var Default_Defaults_F_Bytes []byte = []byte("Bignose") + +const Default_Defaults_F_Sint32 int32 = -32 +const Default_Defaults_F_Sint64 int64 = -64 +const Default_Defaults_F_Enum Defaults_Color = Defaults_GREEN + +var Default_Defaults_F_Pinf float32 = float32(math.Inf(1)) +var Default_Defaults_F_Ninf float32 = float32(math.Inf(-1)) +var Default_Defaults_F_Nan float32 = float32(math.NaN()) + +func (m *Defaults) GetF_Bool() bool { + if m != nil && m.F_Bool != nil { + return *m.F_Bool + } + return Default_Defaults_F_Bool +} + +func (m *Defaults) GetF_Int32() int32 { + if m != nil && m.F_Int32 != nil { + return *m.F_Int32 + } + return Default_Defaults_F_Int32 +} + +func (m *Defaults) GetF_Int64() int64 { + if m != nil && m.F_Int64 != nil { + return *m.F_Int64 + } + return Default_Defaults_F_Int64 +} + +func (m *Defaults) GetF_Fixed32() uint32 { + if m != nil && m.F_Fixed32 != nil { + return *m.F_Fixed32 + } + return Default_Defaults_F_Fixed32 +} + +func (m *Defaults) GetF_Fixed64() uint64 { + if m != nil && m.F_Fixed64 != nil { + return *m.F_Fixed64 + } + return Default_Defaults_F_Fixed64 +} + +func (m *Defaults) GetF_Uint32() uint32 { + if m != nil && m.F_Uint32 != nil { + return *m.F_Uint32 + } + return Default_Defaults_F_Uint32 +} + +func (m *Defaults) GetF_Uint64() uint64 { + if m != nil && m.F_Uint64 != nil { + return *m.F_Uint64 + } + return Default_Defaults_F_Uint64 +} + +func (m *Defaults) GetF_Float() float32 { + if m != nil && m.F_Float != nil { + return *m.F_Float + } + return Default_Defaults_F_Float +} + +func (m *Defaults) GetF_Double() float64 { + if m != nil && m.F_Double != nil { + return *m.F_Double + } + return Default_Defaults_F_Double +} + +func (m *Defaults) GetF_String() string { + if m != nil && m.F_String != nil { + return *m.F_String + } + return Default_Defaults_F_String +} + +func (m *Defaults) GetF_Bytes() []byte { + if m != nil && m.F_Bytes != nil { + return m.F_Bytes + } + return append([]byte(nil), Default_Defaults_F_Bytes...) +} + +func (m *Defaults) GetF_Sint32() int32 { + if m != nil && m.F_Sint32 != nil { + return *m.F_Sint32 + } + return Default_Defaults_F_Sint32 +} + +func (m *Defaults) GetF_Sint64() int64 { + if m != nil && m.F_Sint64 != nil { + return *m.F_Sint64 + } + return Default_Defaults_F_Sint64 +} + +func (m *Defaults) GetF_Enum() Defaults_Color { + if m != nil && m.F_Enum != nil { + return *m.F_Enum + } + return Default_Defaults_F_Enum +} + +func (m *Defaults) GetF_Pinf() float32 { + if m != nil && m.F_Pinf != nil { + return *m.F_Pinf + } + return Default_Defaults_F_Pinf +} + +func (m *Defaults) GetF_Ninf() float32 { + if m != nil && m.F_Ninf != nil { + return *m.F_Ninf + } + return Default_Defaults_F_Ninf +} + +func (m *Defaults) GetF_Nan() float32 { + if m != nil && m.F_Nan != nil { + return *m.F_Nan + } + return Default_Defaults_F_Nan +} + +func (m *Defaults) GetSub() *SubDefaults { + if m != nil { + return m.Sub + } + return nil +} + +func (m *Defaults) GetStrZero() string { + if m != nil && m.StrZero != nil { + return *m.StrZero + } + return "" +} + +type SubDefaults struct { + N *int64 `protobuf:"varint,1,opt,name=n,def=7" json:"n,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SubDefaults) Reset() { *m = SubDefaults{} } +func (m *SubDefaults) String() string { return proto.CompactTextString(m) } +func (*SubDefaults) ProtoMessage() {} + +const Default_SubDefaults_N int64 = 7 + +func (m *SubDefaults) GetN() int64 { + if m != nil && m.N != nil { + return *m.N + } + return Default_SubDefaults_N +} + +type RepeatedEnum struct { + Color []RepeatedEnum_Color `protobuf:"varint,1,rep,name=color,enum=testdata.RepeatedEnum_Color" json:"color,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RepeatedEnum) Reset() { *m = RepeatedEnum{} } +func (m *RepeatedEnum) String() string { return proto.CompactTextString(m) } +func (*RepeatedEnum) ProtoMessage() {} + +func (m *RepeatedEnum) GetColor() []RepeatedEnum_Color { + if m != nil { + return m.Color + } + return nil +} + +type MoreRepeated struct { + Bools []bool `protobuf:"varint,1,rep,name=bools" json:"bools,omitempty"` + BoolsPacked []bool `protobuf:"varint,2,rep,packed,name=bools_packed" json:"bools_packed,omitempty"` + Ints []int32 `protobuf:"varint,3,rep,name=ints" json:"ints,omitempty"` + IntsPacked []int32 `protobuf:"varint,4,rep,packed,name=ints_packed" json:"ints_packed,omitempty"` + Int64SPacked []int64 `protobuf:"varint,7,rep,packed,name=int64s_packed" json:"int64s_packed,omitempty"` + Strings []string `protobuf:"bytes,5,rep,name=strings" json:"strings,omitempty"` + Fixeds []uint32 `protobuf:"fixed32,6,rep,name=fixeds" json:"fixeds,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MoreRepeated) Reset() { *m = MoreRepeated{} } +func (m *MoreRepeated) String() string { return proto.CompactTextString(m) } +func (*MoreRepeated) ProtoMessage() {} + +func (m *MoreRepeated) GetBools() []bool { + if m != nil { + return m.Bools + } + return nil +} + +func (m *MoreRepeated) GetBoolsPacked() []bool { + if m != nil { + return m.BoolsPacked + } + return nil +} + +func (m *MoreRepeated) GetInts() []int32 { + if m != nil { + return m.Ints + } + return nil +} + +func (m *MoreRepeated) GetIntsPacked() []int32 { + if m != nil { + return m.IntsPacked + } + return nil +} + +func (m *MoreRepeated) GetInt64SPacked() []int64 { + if m != nil { + return m.Int64SPacked + } + return nil +} + +func (m *MoreRepeated) GetStrings() []string { + if m != nil { + return m.Strings + } + return nil +} + +func (m *MoreRepeated) GetFixeds() []uint32 { + if m != nil { + return m.Fixeds + } + return nil +} + +type GroupOld struct { + G *GroupOld_G `protobuf:"group,101,opt,name=G" json:"g,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupOld) Reset() { *m = GroupOld{} } +func (m *GroupOld) String() string { return proto.CompactTextString(m) } +func (*GroupOld) ProtoMessage() {} + +func (m *GroupOld) GetG() *GroupOld_G { + if m != nil { + return m.G + } + return nil +} + +type GroupOld_G struct { + X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupOld_G) Reset() { *m = GroupOld_G{} } +func (m *GroupOld_G) String() string { return proto.CompactTextString(m) } +func (*GroupOld_G) ProtoMessage() {} + +func (m *GroupOld_G) GetX() int32 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +type GroupNew struct { + G *GroupNew_G `protobuf:"group,101,opt,name=G" json:"g,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupNew) Reset() { *m = GroupNew{} } +func (m *GroupNew) String() string { return proto.CompactTextString(m) } +func (*GroupNew) ProtoMessage() {} + +func (m *GroupNew) GetG() *GroupNew_G { + if m != nil { + return m.G + } + return nil +} + +type GroupNew_G struct { + X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` + Y *int32 `protobuf:"varint,3,opt,name=y" json:"y,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupNew_G) Reset() { *m = GroupNew_G{} } +func (m *GroupNew_G) String() string { return proto.CompactTextString(m) } +func (*GroupNew_G) ProtoMessage() {} + +func (m *GroupNew_G) GetX() int32 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +func (m *GroupNew_G) GetY() int32 { + if m != nil && m.Y != nil { + return *m.Y + } + return 0 +} + +type FloatingPoint struct { + F *float64 `protobuf:"fixed64,1,req,name=f" json:"f,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FloatingPoint) Reset() { *m = FloatingPoint{} } +func (m *FloatingPoint) String() string { return proto.CompactTextString(m) } +func (*FloatingPoint) ProtoMessage() {} + +func (m *FloatingPoint) GetF() float64 { + if m != nil && m.F != nil { + return *m.F + } + return 0 +} + +type MessageWithMap struct { + NameMapping map[int32]string `protobuf:"bytes,1,rep,name=name_mapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + MsgMapping map[int64]*FloatingPoint `protobuf:"bytes,2,rep,name=msg_mapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + ByteMapping map[bool][]byte `protobuf:"bytes,3,rep,name=byte_mapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + StrToStr map[string]string `protobuf:"bytes,4,rep,name=str_to_str" json:"str_to_str,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } +func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } +func (*MessageWithMap) ProtoMessage() {} + +func (m *MessageWithMap) GetNameMapping() map[int32]string { + if m != nil { + return m.NameMapping + } + return nil +} + +func (m *MessageWithMap) GetMsgMapping() map[int64]*FloatingPoint { + if m != nil { + return m.MsgMapping + } + return nil +} + +func (m *MessageWithMap) GetByteMapping() map[bool][]byte { + if m != nil { + return m.ByteMapping + } + return nil +} + +func (m *MessageWithMap) GetStrToStr() map[string]string { + if m != nil { + return m.StrToStr + } + return nil +} + +type Communique struct { + MakeMeCry *bool `protobuf:"varint,1,opt,name=make_me_cry" json:"make_me_cry,omitempty"` + // This is a oneof, called "union". + // + // Types that are valid to be assigned to Union: + // *Communique_Number + // *Communique_Name + // *Communique_Data + // *Communique_TempC + // *Communique_Col + // *Communique_Msg + Union isCommunique_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Communique) Reset() { *m = Communique{} } +func (m *Communique) String() string { return proto.CompactTextString(m) } +func (*Communique) ProtoMessage() {} + +type isCommunique_Union interface { + isCommunique_Union() +} + +type Communique_Number struct { + Number int32 `protobuf:"varint,5,opt,name=number,oneof"` +} +type Communique_Name struct { + Name string `protobuf:"bytes,6,opt,name=name,oneof"` +} +type Communique_Data struct { + Data []byte `protobuf:"bytes,7,opt,name=data,oneof"` +} +type Communique_TempC struct { + TempC float64 `protobuf:"fixed64,8,opt,name=temp_c,oneof"` +} +type Communique_Col struct { + Col MyMessage_Color `protobuf:"varint,9,opt,name=col,enum=testdata.MyMessage_Color,oneof"` +} +type Communique_Msg struct { + Msg *Strings `protobuf:"bytes,10,opt,name=msg,oneof"` +} + +func (*Communique_Number) isCommunique_Union() {} +func (*Communique_Name) isCommunique_Union() {} +func (*Communique_Data) isCommunique_Union() {} +func (*Communique_TempC) isCommunique_Union() {} +func (*Communique_Col) isCommunique_Union() {} +func (*Communique_Msg) isCommunique_Union() {} + +func (m *Communique) GetUnion() isCommunique_Union { + if m != nil { + return m.Union + } + return nil +} + +func (m *Communique) GetMakeMeCry() bool { + if m != nil && m.MakeMeCry != nil { + return *m.MakeMeCry + } + return false +} + +func (m *Communique) GetNumber() int32 { + if x, ok := m.GetUnion().(*Communique_Number); ok { + return x.Number + } + return 0 +} + +func (m *Communique) GetName() string { + if x, ok := m.GetUnion().(*Communique_Name); ok { + return x.Name + } + return "" +} + +func (m *Communique) GetData() []byte { + if x, ok := m.GetUnion().(*Communique_Data); ok { + return x.Data + } + return nil +} + +func (m *Communique) GetTempC() float64 { + if x, ok := m.GetUnion().(*Communique_TempC); ok { + return x.TempC + } + return 0 +} + +func (m *Communique) GetCol() MyMessage_Color { + if x, ok := m.GetUnion().(*Communique_Col); ok { + return x.Col + } + return MyMessage_RED +} + +func (m *Communique) GetMsg() *Strings { + if x, ok := m.GetUnion().(*Communique_Msg); ok { + return x.Msg + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Communique) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), []interface{}) { + return _Communique_OneofMarshaler, _Communique_OneofUnmarshaler, []interface{}{ + (*Communique_Number)(nil), + (*Communique_Name)(nil), + (*Communique_Data)(nil), + (*Communique_TempC)(nil), + (*Communique_Col)(nil), + (*Communique_Msg)(nil), + } +} + +func _Communique_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Communique) + // union + switch x := m.Union.(type) { + case *Communique_Number: + b.EncodeVarint(5<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Number)) + case *Communique_Name: + b.EncodeVarint(6<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Name) + case *Communique_Data: + b.EncodeVarint(7<<3 | proto.WireBytes) + b.EncodeRawBytes(x.Data) + case *Communique_TempC: + b.EncodeVarint(8<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.TempC)) + case *Communique_Col: + b.EncodeVarint(9<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Col)) + case *Communique_Msg: + b.EncodeVarint(10<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Msg); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Communique.Union has unexpected type %T", x) + } + return nil +} + +func _Communique_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Communique) + switch tag { + case 5: // union.number + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Communique_Number{int32(x)} + return true, err + case 6: // union.name + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Union = &Communique_Name{x} + return true, err + case 7: // union.data + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Union = &Communique_Data{x} + return true, err + case 8: // union.temp_c + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Union = &Communique_TempC{math.Float64frombits(x)} + return true, err + case 9: // union.col + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Communique_Col{MyMessage_Color(x)} + return true, err + case 10: // union.msg + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Strings) + err := b.DecodeMessage(msg) + m.Union = &Communique_Msg{msg} + return true, err + default: + return false, nil + } +} + +var E_Greeting = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: ([]string)(nil), + Field: 106, + Name: "testdata.greeting", + Tag: "bytes,106,rep,name=greeting", +} + +var E_NoDefaultDouble = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float64)(nil), + Field: 101, + Name: "testdata.no_default_double", + Tag: "fixed64,101,opt,name=no_default_double", +} + +var E_NoDefaultFloat = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float32)(nil), + Field: 102, + Name: "testdata.no_default_float", + Tag: "fixed32,102,opt,name=no_default_float", +} + +var E_NoDefaultInt32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 103, + Name: "testdata.no_default_int32", + Tag: "varint,103,opt,name=no_default_int32", +} + +var E_NoDefaultInt64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 104, + Name: "testdata.no_default_int64", + Tag: "varint,104,opt,name=no_default_int64", +} + +var E_NoDefaultUint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 105, + Name: "testdata.no_default_uint32", + Tag: "varint,105,opt,name=no_default_uint32", +} + +var E_NoDefaultUint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 106, + Name: "testdata.no_default_uint64", + Tag: "varint,106,opt,name=no_default_uint64", +} + +var E_NoDefaultSint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 107, + Name: "testdata.no_default_sint32", + Tag: "zigzag32,107,opt,name=no_default_sint32", +} + +var E_NoDefaultSint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 108, + Name: "testdata.no_default_sint64", + Tag: "zigzag64,108,opt,name=no_default_sint64", +} + +var E_NoDefaultFixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 109, + Name: "testdata.no_default_fixed32", + Tag: "fixed32,109,opt,name=no_default_fixed32", +} + +var E_NoDefaultFixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 110, + Name: "testdata.no_default_fixed64", + Tag: "fixed64,110,opt,name=no_default_fixed64", +} + +var E_NoDefaultSfixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 111, + Name: "testdata.no_default_sfixed32", + Tag: "fixed32,111,opt,name=no_default_sfixed32", +} + +var E_NoDefaultSfixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 112, + Name: "testdata.no_default_sfixed64", + Tag: "fixed64,112,opt,name=no_default_sfixed64", +} + +var E_NoDefaultBool = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*bool)(nil), + Field: 113, + Name: "testdata.no_default_bool", + Tag: "varint,113,opt,name=no_default_bool", +} + +var E_NoDefaultString = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*string)(nil), + Field: 114, + Name: "testdata.no_default_string", + Tag: "bytes,114,opt,name=no_default_string", +} + +var E_NoDefaultBytes = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: ([]byte)(nil), + Field: 115, + Name: "testdata.no_default_bytes", + Tag: "bytes,115,opt,name=no_default_bytes", +} + +var E_NoDefaultEnum = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil), + Field: 116, + Name: "testdata.no_default_enum", + Tag: "varint,116,opt,name=no_default_enum,enum=testdata.DefaultsMessage_DefaultsEnum", +} + +var E_DefaultDouble = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float64)(nil), + Field: 201, + Name: "testdata.default_double", + Tag: "fixed64,201,opt,name=default_double,def=3.1415", +} + +var E_DefaultFloat = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float32)(nil), + Field: 202, + Name: "testdata.default_float", + Tag: "fixed32,202,opt,name=default_float,def=3.14", +} + +var E_DefaultInt32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 203, + Name: "testdata.default_int32", + Tag: "varint,203,opt,name=default_int32,def=42", +} + +var E_DefaultInt64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 204, + Name: "testdata.default_int64", + Tag: "varint,204,opt,name=default_int64,def=43", +} + +var E_DefaultUint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 205, + Name: "testdata.default_uint32", + Tag: "varint,205,opt,name=default_uint32,def=44", +} + +var E_DefaultUint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 206, + Name: "testdata.default_uint64", + Tag: "varint,206,opt,name=default_uint64,def=45", +} + +var E_DefaultSint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 207, + Name: "testdata.default_sint32", + Tag: "zigzag32,207,opt,name=default_sint32,def=46", +} + +var E_DefaultSint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 208, + Name: "testdata.default_sint64", + Tag: "zigzag64,208,opt,name=default_sint64,def=47", +} + +var E_DefaultFixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 209, + Name: "testdata.default_fixed32", + Tag: "fixed32,209,opt,name=default_fixed32,def=48", +} + +var E_DefaultFixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 210, + Name: "testdata.default_fixed64", + Tag: "fixed64,210,opt,name=default_fixed64,def=49", +} + +var E_DefaultSfixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 211, + Name: "testdata.default_sfixed32", + Tag: "fixed32,211,opt,name=default_sfixed32,def=50", +} + +var E_DefaultSfixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 212, + Name: "testdata.default_sfixed64", + Tag: "fixed64,212,opt,name=default_sfixed64,def=51", +} + +var E_DefaultBool = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*bool)(nil), + Field: 213, + Name: "testdata.default_bool", + Tag: "varint,213,opt,name=default_bool,def=1", +} + +var E_DefaultString = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*string)(nil), + Field: 214, + Name: "testdata.default_string", + Tag: "bytes,214,opt,name=default_string,def=Hello, string", +} + +var E_DefaultBytes = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: ([]byte)(nil), + Field: 215, + Name: "testdata.default_bytes", + Tag: "bytes,215,opt,name=default_bytes,def=Hello, bytes", +} + +var E_DefaultEnum = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil), + Field: 216, + Name: "testdata.default_enum", + Tag: "varint,216,opt,name=default_enum,enum=testdata.DefaultsMessage_DefaultsEnum,def=1", +} + +var E_X201 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 201, + Name: "testdata.x201", + Tag: "bytes,201,opt,name=x201", +} + +var E_X202 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 202, + Name: "testdata.x202", + Tag: "bytes,202,opt,name=x202", +} + +var E_X203 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 203, + Name: "testdata.x203", + Tag: "bytes,203,opt,name=x203", +} + +var E_X204 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 204, + Name: "testdata.x204", + Tag: "bytes,204,opt,name=x204", +} + +var E_X205 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 205, + Name: "testdata.x205", + Tag: "bytes,205,opt,name=x205", +} + +var E_X206 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 206, + Name: "testdata.x206", + Tag: "bytes,206,opt,name=x206", +} + +var E_X207 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 207, + Name: "testdata.x207", + Tag: "bytes,207,opt,name=x207", +} + +var E_X208 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 208, + Name: "testdata.x208", + Tag: "bytes,208,opt,name=x208", +} + +var E_X209 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 209, + Name: "testdata.x209", + Tag: "bytes,209,opt,name=x209", +} + +var E_X210 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 210, + Name: "testdata.x210", + Tag: "bytes,210,opt,name=x210", +} + +var E_X211 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 211, + Name: "testdata.x211", + Tag: "bytes,211,opt,name=x211", +} + +var E_X212 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 212, + Name: "testdata.x212", + Tag: "bytes,212,opt,name=x212", +} + +var E_X213 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 213, + Name: "testdata.x213", + Tag: "bytes,213,opt,name=x213", +} + +var E_X214 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 214, + Name: "testdata.x214", + Tag: "bytes,214,opt,name=x214", +} + +var E_X215 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 215, + Name: "testdata.x215", + Tag: "bytes,215,opt,name=x215", +} + +var E_X216 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 216, + Name: "testdata.x216", + Tag: "bytes,216,opt,name=x216", +} + +var E_X217 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 217, + Name: "testdata.x217", + Tag: "bytes,217,opt,name=x217", +} + +var E_X218 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 218, + Name: "testdata.x218", + Tag: "bytes,218,opt,name=x218", +} + +var E_X219 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 219, + Name: "testdata.x219", + Tag: "bytes,219,opt,name=x219", +} + +var E_X220 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 220, + Name: "testdata.x220", + Tag: "bytes,220,opt,name=x220", +} + +var E_X221 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 221, + Name: "testdata.x221", + Tag: "bytes,221,opt,name=x221", +} + +var E_X222 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 222, + Name: "testdata.x222", + Tag: "bytes,222,opt,name=x222", +} + +var E_X223 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 223, + Name: "testdata.x223", + Tag: "bytes,223,opt,name=x223", +} + +var E_X224 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 224, + Name: "testdata.x224", + Tag: "bytes,224,opt,name=x224", +} + +var E_X225 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 225, + Name: "testdata.x225", + Tag: "bytes,225,opt,name=x225", +} + +var E_X226 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 226, + Name: "testdata.x226", + Tag: "bytes,226,opt,name=x226", +} + +var E_X227 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 227, + Name: "testdata.x227", + Tag: "bytes,227,opt,name=x227", +} + +var E_X228 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 228, + Name: "testdata.x228", + Tag: "bytes,228,opt,name=x228", +} + +var E_X229 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 229, + Name: "testdata.x229", + Tag: "bytes,229,opt,name=x229", +} + +var E_X230 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 230, + Name: "testdata.x230", + Tag: "bytes,230,opt,name=x230", +} + +var E_X231 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 231, + Name: "testdata.x231", + Tag: "bytes,231,opt,name=x231", +} + +var E_X232 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 232, + Name: "testdata.x232", + Tag: "bytes,232,opt,name=x232", +} + +var E_X233 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 233, + Name: "testdata.x233", + Tag: "bytes,233,opt,name=x233", +} + +var E_X234 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 234, + Name: "testdata.x234", + Tag: "bytes,234,opt,name=x234", +} + +var E_X235 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 235, + Name: "testdata.x235", + Tag: "bytes,235,opt,name=x235", +} + +var E_X236 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 236, + Name: "testdata.x236", + Tag: "bytes,236,opt,name=x236", +} + +var E_X237 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 237, + Name: "testdata.x237", + Tag: "bytes,237,opt,name=x237", +} + +var E_X238 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 238, + Name: "testdata.x238", + Tag: "bytes,238,opt,name=x238", +} + +var E_X239 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 239, + Name: "testdata.x239", + Tag: "bytes,239,opt,name=x239", +} + +var E_X240 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 240, + Name: "testdata.x240", + Tag: "bytes,240,opt,name=x240", +} + +var E_X241 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 241, + Name: "testdata.x241", + Tag: "bytes,241,opt,name=x241", +} + +var E_X242 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 242, + Name: "testdata.x242", + Tag: "bytes,242,opt,name=x242", +} + +var E_X243 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 243, + Name: "testdata.x243", + Tag: "bytes,243,opt,name=x243", +} + +var E_X244 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 244, + Name: "testdata.x244", + Tag: "bytes,244,opt,name=x244", +} + +var E_X245 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 245, + Name: "testdata.x245", + Tag: "bytes,245,opt,name=x245", +} + +var E_X246 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 246, + Name: "testdata.x246", + Tag: "bytes,246,opt,name=x246", +} + +var E_X247 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 247, + Name: "testdata.x247", + Tag: "bytes,247,opt,name=x247", +} + +var E_X248 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 248, + Name: "testdata.x248", + Tag: "bytes,248,opt,name=x248", +} + +var E_X249 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 249, + Name: "testdata.x249", + Tag: "bytes,249,opt,name=x249", +} + +var E_X250 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 250, + Name: "testdata.x250", + Tag: "bytes,250,opt,name=x250", +} + +func init() { + proto.RegisterEnum("testdata.FOO", FOO_name, FOO_value) + proto.RegisterEnum("testdata.GoTest_KIND", GoTest_KIND_name, GoTest_KIND_value) + proto.RegisterEnum("testdata.MyMessage_Color", MyMessage_Color_name, MyMessage_Color_value) + proto.RegisterEnum("testdata.DefaultsMessage_DefaultsEnum", DefaultsMessage_DefaultsEnum_name, DefaultsMessage_DefaultsEnum_value) + proto.RegisterEnum("testdata.Defaults_Color", Defaults_Color_name, Defaults_Color_value) + proto.RegisterEnum("testdata.RepeatedEnum_Color", RepeatedEnum_Color_name, RepeatedEnum_Color_value) + proto.RegisterExtension(E_Ext_More) + proto.RegisterExtension(E_Ext_Text) + proto.RegisterExtension(E_Ext_Number) + proto.RegisterExtension(E_Greeting) + proto.RegisterExtension(E_NoDefaultDouble) + proto.RegisterExtension(E_NoDefaultFloat) + proto.RegisterExtension(E_NoDefaultInt32) + proto.RegisterExtension(E_NoDefaultInt64) + proto.RegisterExtension(E_NoDefaultUint32) + proto.RegisterExtension(E_NoDefaultUint64) + proto.RegisterExtension(E_NoDefaultSint32) + proto.RegisterExtension(E_NoDefaultSint64) + proto.RegisterExtension(E_NoDefaultFixed32) + proto.RegisterExtension(E_NoDefaultFixed64) + proto.RegisterExtension(E_NoDefaultSfixed32) + proto.RegisterExtension(E_NoDefaultSfixed64) + proto.RegisterExtension(E_NoDefaultBool) + proto.RegisterExtension(E_NoDefaultString) + proto.RegisterExtension(E_NoDefaultBytes) + proto.RegisterExtension(E_NoDefaultEnum) + proto.RegisterExtension(E_DefaultDouble) + proto.RegisterExtension(E_DefaultFloat) + proto.RegisterExtension(E_DefaultInt32) + proto.RegisterExtension(E_DefaultInt64) + proto.RegisterExtension(E_DefaultUint32) + proto.RegisterExtension(E_DefaultUint64) + proto.RegisterExtension(E_DefaultSint32) + proto.RegisterExtension(E_DefaultSint64) + proto.RegisterExtension(E_DefaultFixed32) + proto.RegisterExtension(E_DefaultFixed64) + proto.RegisterExtension(E_DefaultSfixed32) + proto.RegisterExtension(E_DefaultSfixed64) + proto.RegisterExtension(E_DefaultBool) + proto.RegisterExtension(E_DefaultString) + proto.RegisterExtension(E_DefaultBytes) + proto.RegisterExtension(E_DefaultEnum) + proto.RegisterExtension(E_X201) + proto.RegisterExtension(E_X202) + proto.RegisterExtension(E_X203) + proto.RegisterExtension(E_X204) + proto.RegisterExtension(E_X205) + proto.RegisterExtension(E_X206) + proto.RegisterExtension(E_X207) + proto.RegisterExtension(E_X208) + proto.RegisterExtension(E_X209) + proto.RegisterExtension(E_X210) + proto.RegisterExtension(E_X211) + proto.RegisterExtension(E_X212) + proto.RegisterExtension(E_X213) + proto.RegisterExtension(E_X214) + proto.RegisterExtension(E_X215) + proto.RegisterExtension(E_X216) + proto.RegisterExtension(E_X217) + proto.RegisterExtension(E_X218) + proto.RegisterExtension(E_X219) + proto.RegisterExtension(E_X220) + proto.RegisterExtension(E_X221) + proto.RegisterExtension(E_X222) + proto.RegisterExtension(E_X223) + proto.RegisterExtension(E_X224) + proto.RegisterExtension(E_X225) + proto.RegisterExtension(E_X226) + proto.RegisterExtension(E_X227) + proto.RegisterExtension(E_X228) + proto.RegisterExtension(E_X229) + proto.RegisterExtension(E_X230) + proto.RegisterExtension(E_X231) + proto.RegisterExtension(E_X232) + proto.RegisterExtension(E_X233) + proto.RegisterExtension(E_X234) + proto.RegisterExtension(E_X235) + proto.RegisterExtension(E_X236) + proto.RegisterExtension(E_X237) + proto.RegisterExtension(E_X238) + proto.RegisterExtension(E_X239) + proto.RegisterExtension(E_X240) + proto.RegisterExtension(E_X241) + proto.RegisterExtension(E_X242) + proto.RegisterExtension(E_X243) + proto.RegisterExtension(E_X244) + proto.RegisterExtension(E_X245) + proto.RegisterExtension(E_X246) + proto.RegisterExtension(E_X247) + proto.RegisterExtension(E_X248) + proto.RegisterExtension(E_X249) + proto.RegisterExtension(E_X250) +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.proto b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.proto new file mode 100644 index 000000000..3d1cbb675 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.proto @@ -0,0 +1,494 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// A feature-rich test file for the protocol compiler and libraries. + +syntax = "proto2"; + +package testdata; + +enum FOO { FOO1 = 1; }; + +message GoEnum { + required FOO foo = 1; +} + +message GoTestField { + required string Label = 1; + required string Type = 2; +} + +message GoTest { + // An enum, for completeness. + enum KIND { + VOID = 0; + + // Basic types + BOOL = 1; + BYTES = 2; + FINGERPRINT = 3; + FLOAT = 4; + INT = 5; + STRING = 6; + TIME = 7; + + // Groupings + TUPLE = 8; + ARRAY = 9; + MAP = 10; + + // Table types + TABLE = 11; + + // Functions + FUNCTION = 12; // last tag + }; + + // Some typical parameters + required KIND Kind = 1; + optional string Table = 2; + optional int32 Param = 3; + + // Required, repeated and optional foreign fields. + required GoTestField RequiredField = 4; + repeated GoTestField RepeatedField = 5; + optional GoTestField OptionalField = 6; + + // Required fields of all basic types + required bool F_Bool_required = 10; + required int32 F_Int32_required = 11; + required int64 F_Int64_required = 12; + required fixed32 F_Fixed32_required = 13; + required fixed64 F_Fixed64_required = 14; + required uint32 F_Uint32_required = 15; + required uint64 F_Uint64_required = 16; + required float F_Float_required = 17; + required double F_Double_required = 18; + required string F_String_required = 19; + required bytes F_Bytes_required = 101; + required sint32 F_Sint32_required = 102; + required sint64 F_Sint64_required = 103; + + // Repeated fields of all basic types + repeated bool F_Bool_repeated = 20; + repeated int32 F_Int32_repeated = 21; + repeated int64 F_Int64_repeated = 22; + repeated fixed32 F_Fixed32_repeated = 23; + repeated fixed64 F_Fixed64_repeated = 24; + repeated uint32 F_Uint32_repeated = 25; + repeated uint64 F_Uint64_repeated = 26; + repeated float F_Float_repeated = 27; + repeated double F_Double_repeated = 28; + repeated string F_String_repeated = 29; + repeated bytes F_Bytes_repeated = 201; + repeated sint32 F_Sint32_repeated = 202; + repeated sint64 F_Sint64_repeated = 203; + + // Optional fields of all basic types + optional bool F_Bool_optional = 30; + optional int32 F_Int32_optional = 31; + optional int64 F_Int64_optional = 32; + optional fixed32 F_Fixed32_optional = 33; + optional fixed64 F_Fixed64_optional = 34; + optional uint32 F_Uint32_optional = 35; + optional uint64 F_Uint64_optional = 36; + optional float F_Float_optional = 37; + optional double F_Double_optional = 38; + optional string F_String_optional = 39; + optional bytes F_Bytes_optional = 301; + optional sint32 F_Sint32_optional = 302; + optional sint64 F_Sint64_optional = 303; + + // Default-valued fields of all basic types + optional bool F_Bool_defaulted = 40 [default=true]; + optional int32 F_Int32_defaulted = 41 [default=32]; + optional int64 F_Int64_defaulted = 42 [default=64]; + optional fixed32 F_Fixed32_defaulted = 43 [default=320]; + optional fixed64 F_Fixed64_defaulted = 44 [default=640]; + optional uint32 F_Uint32_defaulted = 45 [default=3200]; + optional uint64 F_Uint64_defaulted = 46 [default=6400]; + optional float F_Float_defaulted = 47 [default=314159.]; + optional double F_Double_defaulted = 48 [default=271828.]; + optional string F_String_defaulted = 49 [default="hello, \"world!\"\n"]; + optional bytes F_Bytes_defaulted = 401 [default="Bignose"]; + optional sint32 F_Sint32_defaulted = 402 [default = -32]; + optional sint64 F_Sint64_defaulted = 403 [default = -64]; + + // Packed repeated fields (no string or bytes). + repeated bool F_Bool_repeated_packed = 50 [packed=true]; + repeated int32 F_Int32_repeated_packed = 51 [packed=true]; + repeated int64 F_Int64_repeated_packed = 52 [packed=true]; + repeated fixed32 F_Fixed32_repeated_packed = 53 [packed=true]; + repeated fixed64 F_Fixed64_repeated_packed = 54 [packed=true]; + repeated uint32 F_Uint32_repeated_packed = 55 [packed=true]; + repeated uint64 F_Uint64_repeated_packed = 56 [packed=true]; + repeated float F_Float_repeated_packed = 57 [packed=true]; + repeated double F_Double_repeated_packed = 58 [packed=true]; + repeated sint32 F_Sint32_repeated_packed = 502 [packed=true]; + repeated sint64 F_Sint64_repeated_packed = 503 [packed=true]; + + // Required, repeated, and optional groups. + required group RequiredGroup = 70 { + required string RequiredField = 71; + }; + + repeated group RepeatedGroup = 80 { + required string RequiredField = 81; + }; + + optional group OptionalGroup = 90 { + required string RequiredField = 91; + }; +} + +// For testing skipping of unrecognized fields. +// Numbers are all big, larger than tag numbers in GoTestField, +// the message used in the corresponding test. +message GoSkipTest { + required int32 skip_int32 = 11; + required fixed32 skip_fixed32 = 12; + required fixed64 skip_fixed64 = 13; + required string skip_string = 14; + required group SkipGroup = 15 { + required int32 group_int32 = 16; + required string group_string = 17; + } +} + +// For testing packed/non-packed decoder switching. +// A serialized instance of one should be deserializable as the other. +message NonPackedTest { + repeated int32 a = 1; +} + +message PackedTest { + repeated int32 b = 1 [packed=true]; +} + +message MaxTag { + // Maximum possible tag number. + optional string last_field = 536870911; +} + +message OldMessage { + message Nested { + optional string name = 1; + } + optional Nested nested = 1; + + optional int32 num = 2; +} + +// NewMessage is wire compatible with OldMessage; +// imagine it as a future version. +message NewMessage { + message Nested { + optional string name = 1; + optional string food_group = 2; + } + optional Nested nested = 1; + + // This is an int32 in OldMessage. + optional int64 num = 2; +} + +// Smaller tests for ASCII formatting. + +message InnerMessage { + required string host = 1; + optional int32 port = 2 [default=4000]; + optional bool connected = 3; +} + +message OtherMessage { + optional int64 key = 1; + optional bytes value = 2; + optional float weight = 3; + optional InnerMessage inner = 4; +} + +message MyMessage { + required int32 count = 1; + optional string name = 2; + optional string quote = 3; + repeated string pet = 4; + optional InnerMessage inner = 5; + repeated OtherMessage others = 6; + repeated InnerMessage rep_inner = 12; + + enum Color { + RED = 0; + GREEN = 1; + BLUE = 2; + }; + optional Color bikeshed = 7; + + optional group SomeGroup = 8 { + optional int32 group_field = 9; + } + + // This field becomes [][]byte in the generated code. + repeated bytes rep_bytes = 10; + + optional double bigfloat = 11; + + extensions 100 to max; +} + +message Ext { + extend MyMessage { + optional Ext more = 103; + optional string text = 104; + optional int32 number = 105; + } + + optional string data = 1; +} + +extend MyMessage { + repeated string greeting = 106; +} + +message DefaultsMessage { + enum DefaultsEnum { + ZERO = 0; + ONE = 1; + TWO = 2; + }; + extensions 100 to max; +} + +extend DefaultsMessage { + optional double no_default_double = 101; + optional float no_default_float = 102; + optional int32 no_default_int32 = 103; + optional int64 no_default_int64 = 104; + optional uint32 no_default_uint32 = 105; + optional uint64 no_default_uint64 = 106; + optional sint32 no_default_sint32 = 107; + optional sint64 no_default_sint64 = 108; + optional fixed32 no_default_fixed32 = 109; + optional fixed64 no_default_fixed64 = 110; + optional sfixed32 no_default_sfixed32 = 111; + optional sfixed64 no_default_sfixed64 = 112; + optional bool no_default_bool = 113; + optional string no_default_string = 114; + optional bytes no_default_bytes = 115; + optional DefaultsMessage.DefaultsEnum no_default_enum = 116; + + optional double default_double = 201 [default = 3.1415]; + optional float default_float = 202 [default = 3.14]; + optional int32 default_int32 = 203 [default = 42]; + optional int64 default_int64 = 204 [default = 43]; + optional uint32 default_uint32 = 205 [default = 44]; + optional uint64 default_uint64 = 206 [default = 45]; + optional sint32 default_sint32 = 207 [default = 46]; + optional sint64 default_sint64 = 208 [default = 47]; + optional fixed32 default_fixed32 = 209 [default = 48]; + optional fixed64 default_fixed64 = 210 [default = 49]; + optional sfixed32 default_sfixed32 = 211 [default = 50]; + optional sfixed64 default_sfixed64 = 212 [default = 51]; + optional bool default_bool = 213 [default = true]; + optional string default_string = 214 [default = "Hello, string"]; + optional bytes default_bytes = 215 [default = "Hello, bytes"]; + optional DefaultsMessage.DefaultsEnum default_enum = 216 [default = ONE]; +} + +message MyMessageSet { + option message_set_wire_format = true; + extensions 100 to max; +} + +message Empty { +} + +extend MyMessageSet { + optional Empty x201 = 201; + optional Empty x202 = 202; + optional Empty x203 = 203; + optional Empty x204 = 204; + optional Empty x205 = 205; + optional Empty x206 = 206; + optional Empty x207 = 207; + optional Empty x208 = 208; + optional Empty x209 = 209; + optional Empty x210 = 210; + optional Empty x211 = 211; + optional Empty x212 = 212; + optional Empty x213 = 213; + optional Empty x214 = 214; + optional Empty x215 = 215; + optional Empty x216 = 216; + optional Empty x217 = 217; + optional Empty x218 = 218; + optional Empty x219 = 219; + optional Empty x220 = 220; + optional Empty x221 = 221; + optional Empty x222 = 222; + optional Empty x223 = 223; + optional Empty x224 = 224; + optional Empty x225 = 225; + optional Empty x226 = 226; + optional Empty x227 = 227; + optional Empty x228 = 228; + optional Empty x229 = 229; + optional Empty x230 = 230; + optional Empty x231 = 231; + optional Empty x232 = 232; + optional Empty x233 = 233; + optional Empty x234 = 234; + optional Empty x235 = 235; + optional Empty x236 = 236; + optional Empty x237 = 237; + optional Empty x238 = 238; + optional Empty x239 = 239; + optional Empty x240 = 240; + optional Empty x241 = 241; + optional Empty x242 = 242; + optional Empty x243 = 243; + optional Empty x244 = 244; + optional Empty x245 = 245; + optional Empty x246 = 246; + optional Empty x247 = 247; + optional Empty x248 = 248; + optional Empty x249 = 249; + optional Empty x250 = 250; +} + +message MessageList { + repeated group Message = 1 { + required string name = 2; + required int32 count = 3; + } +} + +message Strings { + optional string string_field = 1; + optional bytes bytes_field = 2; +} + +message Defaults { + enum Color { + RED = 0; + GREEN = 1; + BLUE = 2; + } + + // Default-valued fields of all basic types. + // Same as GoTest, but copied here to make testing easier. + optional bool F_Bool = 1 [default=true]; + optional int32 F_Int32 = 2 [default=32]; + optional int64 F_Int64 = 3 [default=64]; + optional fixed32 F_Fixed32 = 4 [default=320]; + optional fixed64 F_Fixed64 = 5 [default=640]; + optional uint32 F_Uint32 = 6 [default=3200]; + optional uint64 F_Uint64 = 7 [default=6400]; + optional float F_Float = 8 [default=314159.]; + optional double F_Double = 9 [default=271828.]; + optional string F_String = 10 [default="hello, \"world!\"\n"]; + optional bytes F_Bytes = 11 [default="Bignose"]; + optional sint32 F_Sint32 = 12 [default=-32]; + optional sint64 F_Sint64 = 13 [default=-64]; + optional Color F_Enum = 14 [default=GREEN]; + + // More fields with crazy defaults. + optional float F_Pinf = 15 [default=inf]; + optional float F_Ninf = 16 [default=-inf]; + optional float F_Nan = 17 [default=nan]; + + // Sub-message. + optional SubDefaults sub = 18; + + // Redundant but explicit defaults. + optional string str_zero = 19 [default=""]; +} + +message SubDefaults { + optional int64 n = 1 [default=7]; +} + +message RepeatedEnum { + enum Color { + RED = 1; + } + repeated Color color = 1; +} + +message MoreRepeated { + repeated bool bools = 1; + repeated bool bools_packed = 2 [packed=true]; + repeated int32 ints = 3; + repeated int32 ints_packed = 4 [packed=true]; + repeated int64 int64s_packed = 7 [packed=true]; + repeated string strings = 5; + repeated fixed32 fixeds = 6; +} + +// GroupOld and GroupNew have the same wire format. +// GroupNew has a new field inside a group. + +message GroupOld { + optional group G = 101 { + optional int32 x = 2; + } +} + +message GroupNew { + optional group G = 101 { + optional int32 x = 2; + optional int32 y = 3; + } +} + +message FloatingPoint { + required double f = 1; +} + +message MessageWithMap { + map name_mapping = 1; + map msg_mapping = 2; + map byte_mapping = 3; + map str_to_str = 4; +} + +message Communique { + optional bool make_me_cry = 1; + + // This is a oneof, called "union". + oneof union { + int32 number = 5; + string name = 6; + bytes data = 7; + double temp_c = 8; + MyMessage.Color col = 9; + Strings msg = 10; + } +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text.go new file mode 100644 index 000000000..dd69110f8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text.go @@ -0,0 +1,796 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for writing the text protocol buffer format. + +import ( + "bufio" + "bytes" + "encoding" + "errors" + "fmt" + "io" + "log" + "math" + "reflect" + "sort" + "strings" +) + +var ( + newline = []byte("\n") + spaces = []byte(" ") + gtNewline = []byte(">\n") + endBraceNewline = []byte("}\n") + backslashN = []byte{'\\', 'n'} + backslashR = []byte{'\\', 'r'} + backslashT = []byte{'\\', 't'} + backslashDQ = []byte{'\\', '"'} + backslashBS = []byte{'\\', '\\'} + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + ind int + complete bool // if the current position is a complete line + compact bool // whether to write out as a one-liner + w writer +} + +func (w *textWriter) WriteString(s string) (n int, err error) { + if !strings.Contains(s, "\n") { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + return io.WriteString(w.w, s) + } + // WriteString is typically called without newlines, so this + // codepath and its copy are rare. We copy to avoid + // duplicating all of Write's logic here. + return w.Write([]byte(s)) +} + +func (w *textWriter) Write(p []byte) (n int, err error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + n, err = w.w.Write(p) + w.complete = false + return n, err + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + if err := w.w.WriteByte(' '); err != nil { + return n, err + } + n++ + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + if i+1 < len(frags) { + if err := w.w.WriteByte('\n'); err != nil { + return n, err + } + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + err := w.w.WriteByte(c) + w.complete = c == '\n' + return err +} + +func (w *textWriter) indent() { w.ind++ } + +func (w *textWriter) unindent() { + if w.ind == 0 { + log.Printf("proto: textWriter unindented too far") + return + } + w.ind-- +} + +func writeName(w *textWriter, props *Properties) error { + if _, err := w.WriteString(props.OrigName); err != nil { + return err + } + if props.Wire != "group" { + return w.WriteByte(':') + } + return nil +} + +var ( + messageSetType = reflect.TypeOf((*MessageSet)(nil)).Elem() +) + +// raw is the interface satisfied by RawMessage. +type raw interface { + Bytes() []byte +} + +func writeStruct(w *textWriter, sv reflect.Value) error { + if sv.Type() == messageSetType { + return writeMessageSet(w, sv.Addr().Interface().(*MessageSet)) + } + + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < sv.NumField(); i++ { + fv := sv.Field(i) + props := sprops.Prop[i] + name := st.Field(i).Name + + if strings.HasPrefix(name, "XXX_") { + // There are two XXX_ fields: + // XXX_unrecognized []byte + // XXX_extensions map[int32]proto.Extension + // The first is handled here; + // the second is handled at the bottom of this function. + if name == "XXX_unrecognized" && !fv.IsNil() { + if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Field not filled in. This could be an optional field or + // a required field that wasn't filled in. Either way, there + // isn't anything we can show for it. + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + // Repeated field that is empty, or a bytes field that is unused. + continue + } + + if props.Repeated && fv.Kind() == reflect.Slice { + // Repeated field. + for j := 0; j < fv.Len(); j++ { + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + v := fv.Index(j) + if v.Kind() == reflect.Ptr && v.IsNil() { + // A nil message in a repeated field is not valid, + // but we can handle that more gracefully than panicking. + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + continue + } + if err := writeAny(w, v, props); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Map { + // Map fields are rendered as a repeated struct with key/value fields. + keys := fv.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := fv.MapIndex(key) + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + // open struct + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + // key + if _, err := w.WriteString("key:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := writeAny(w, key, props.mkeyprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + // nil values aren't legal, but we can avoid panicking because of them. + if val.Kind() != reflect.Ptr || !val.IsNil() { + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := writeAny(w, val, props.mvalprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + // close struct + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { + // empty bytes field + continue + } + if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { + // proto3 non-repeated scalar field; skip if zero value + if isProto3Zero(fv) { + continue + } + } + + if fv.Kind() == reflect.Interface { + // Check if it is a oneof. + if st.Field(i).Tag.Get("protobuf_oneof") != "" { + // fv is nil, or holds a pointer to generated struct. + // That generated struct has exactly one field, + // which has a protobuf struct tag. + if fv.IsNil() { + continue + } + inner := fv.Elem().Elem() // interface -> *T -> T + tag := inner.Type().Field(0).Tag.Get("protobuf") + props.Parse(tag) // Overwrite the outer props. + // Write the value in the oneof, not the oneof itself. + fv = inner.Field(0) + + // Special case to cope with malformed messages gracefully: + // If the value in the oneof is a nil pointer, don't panic + // in writeAny. + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Use errors.New so writeAny won't render quotes. + msg := errors.New("/* nil */") + fv = reflect.ValueOf(&msg).Elem() + } + } + } + + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if b, ok := fv.Interface().(raw); ok { + if err := writeRaw(w, b.Bytes()); err != nil { + return err + } + continue + } + + // Enums have a String method, so writeAny will work fine. + if err := writeAny(w, fv, props); err != nil { + return err + } + + if err := w.WriteByte('\n'); err != nil { + return err + } + } + + // Extensions (the XXX_extensions field). + pv := sv.Addr() + if pv.Type().Implements(extendableProtoType) { + if err := writeExtensions(w, pv); err != nil { + return err + } + } + + return nil +} + +// writeRaw writes an uninterpreted raw message. +func writeRaw(w *textWriter, b []byte) error { + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if err := writeUnknownStruct(w, b); err != nil { + return err + } + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + return nil +} + +// writeAny writes an arbitrary field. +func writeAny(w *textWriter, v reflect.Value, props *Properties) error { + v = reflect.Indirect(v) + + // Floats have special cases. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + x := v.Float() + var b []byte + switch { + case math.IsInf(x, 1): + b = posInf + case math.IsInf(x, -1): + b = negInf + case math.IsNaN(x): + b = nan + } + if b != nil { + _, err := w.Write(b) + return err + } + // Other values are handled below. + } + + // We don't attempt to serialise every possible value type; only those + // that can occur in protocol buffers. + switch v.Kind() { + case reflect.Slice: + // Should only be a []byte; repeated fields are handled in writeStruct. + if err := writeString(w, string(v.Interface().([]byte))); err != nil { + return err + } + case reflect.String: + if err := writeString(w, v.String()); err != nil { + return err + } + case reflect.Struct: + // Required/optional group/message. + var bra, ket byte = '<', '>' + if props != nil && props.Wire == "group" { + bra, ket = '{', '}' + } + if err := w.WriteByte(bra); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if tm, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err := tm.MarshalText() + if err != nil { + return err + } + if _, err = w.Write(text); err != nil { + return err + } + } else if err := writeStruct(w, v); err != nil { + return err + } + w.unindent() + if err := w.WriteByte(ket); err != nil { + return err + } + default: + _, err := fmt.Fprint(w, v.Interface()) + return err + } + return nil +} + +// equivalent to C's isprint. +func isprint(c byte) bool { + return c >= 0x20 && c < 0x7f +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(w *textWriter, s string) error { + // use WriteByte here to get any needed indent + if err := w.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = w.w.Write(backslashN) + case '\r': + _, err = w.w.Write(backslashR) + case '\t': + _, err = w.w.Write(backslashT) + case '"': + _, err = w.w.Write(backslashDQ) + case '\\': + _, err = w.w.Write(backslashBS) + default: + if isprint(c) { + err = w.w.WriteByte(c) + } else { + _, err = fmt.Fprintf(w.w, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return w.WriteByte('"') +} + +func writeMessageSet(w *textWriter, ms *MessageSet) error { + for _, item := range ms.Item { + id := *item.TypeId + if msd, ok := messageSetMap[id]; ok { + // Known message set type. + if _, err := fmt.Fprintf(w, "[%s]: <\n", msd.name); err != nil { + return err + } + w.indent() + + pb := reflect.New(msd.t.Elem()) + if err := Unmarshal(item.Message, pb.Interface().(Message)); err != nil { + if _, err := fmt.Fprintf(w, "/* bad message: %v */\n", err); err != nil { + return err + } + } else { + if err := writeStruct(w, pb.Elem()); err != nil { + return err + } + } + } else { + // Unknown type. + if _, err := fmt.Fprintf(w, "[%d]: <\n", id); err != nil { + return err + } + w.indent() + if err := writeUnknownStruct(w, item.Message); err != nil { + return err + } + } + w.unindent() + if _, err := w.Write(gtNewline); err != nil { + return err + } + } + return nil +} + +func writeUnknownStruct(w *textWriter, data []byte) (err error) { + if !w.compact { + if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { + return err + } + } + b := NewBuffer(data) + for b.index < len(b.buf) { + x, err := b.DecodeVarint() + if err != nil { + _, err := fmt.Fprintf(w, "/* %v */\n", err) + return err + } + wire, tag := x&7, x>>3 + if wire == WireEndGroup { + w.unindent() + if _, err := w.Write(endBraceNewline); err != nil { + return err + } + continue + } + if _, err := fmt.Fprint(w, tag); err != nil { + return err + } + if wire != WireStartGroup { + if err := w.WriteByte(':'); err != nil { + return err + } + } + if !w.compact || wire == WireStartGroup { + if err := w.WriteByte(' '); err != nil { + return err + } + } + switch wire { + case WireBytes: + buf, e := b.DecodeRawBytes(false) + if e == nil { + _, err = fmt.Fprintf(w, "%q", buf) + } else { + _, err = fmt.Fprintf(w, "/* %v */", e) + } + case WireFixed32: + x, err = b.DecodeFixed32() + err = writeUnknownInt(w, x, err) + case WireFixed64: + x, err = b.DecodeFixed64() + err = writeUnknownInt(w, x, err) + case WireStartGroup: + err = w.WriteByte('{') + w.indent() + case WireVarint: + x, err = b.DecodeVarint() + err = writeUnknownInt(w, x, err) + default: + _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) + } + if err != nil { + return err + } + if err = w.WriteByte('\n'); err != nil { + return err + } + } + return nil +} + +func writeUnknownInt(w *textWriter, x uint64, err error) error { + if err == nil { + _, err = fmt.Fprint(w, x) + } else { + _, err = fmt.Fprintf(w, "/* %v */", err) + } + return err +} + +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// writeExtensions writes all the extensions in pv. +// pv is assumed to be a pointer to a protocol message struct that is extendable. +func writeExtensions(w *textWriter, pv reflect.Value) error { + emap := extensionMaps[pv.Type().Elem()] + ep := pv.Interface().(extendableProto) + + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. + m := ep.ExtensionMap() + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + + for _, extNum := range ids { + ext := m[extNum] + var desc *ExtensionDesc + if emap != nil { + desc = emap[extNum] + } + if desc == nil { + // Unknown extension. + if err := writeUnknownStruct(w, ext.enc); err != nil { + return err + } + continue + } + + pb, err := GetExtension(ep, desc) + if err != nil { + return fmt.Errorf("failed getting extension: %v", err) + } + + // Repeated extensions will appear as a slice. + if !desc.repeated() { + if err := writeExtension(w, desc.Name, pb); err != nil { + return err + } + } else { + v := reflect.ValueOf(pb) + for i := 0; i < v.Len(); i++ { + if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + return err + } + } + } + } + return nil +} + +func writeExtension(w *textWriter, name string, pb interface{}) error { + if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + remain := w.ind * 2 + for remain > 0 { + n := remain + if n > len(spaces) { + n = len(spaces) + } + w.w.Write(spaces[:n]) + remain -= n + } + w.complete = false +} + +func marshalText(w io.Writer, pb Message, compact bool) error { + val := reflect.ValueOf(pb) + if pb == nil || val.IsNil() { + w.Write([]byte("")) + return nil + } + var bw *bufio.Writer + ww, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + ww = bw + } + aw := &textWriter{ + w: ww, + complete: true, + compact: compact, + } + + if tm, ok := pb.(encoding.TextMarshaler); ok { + text, err := tm.MarshalText() + if err != nil { + return err + } + if _, err = aw.Write(text); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil + } + // Dereference the received pointer so we don't have outer < and >. + v := reflect.Indirect(val) + if err := writeStruct(aw, v); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil +} + +// MarshalText writes a given protocol buffer in text format. +// The only errors returned are from w. +func MarshalText(w io.Writer, pb Message) error { + return marshalText(w, pb, false) +} + +// MarshalTextString is the same as MarshalText, but returns the string directly. +func MarshalTextString(pb Message) string { + var buf bytes.Buffer + marshalText(&buf, pb, false) + return buf.String() +} + +// CompactText writes a given protocol buffer in compact text format (one line). +func CompactText(w io.Writer, pb Message) error { return marshalText(w, pb, true) } + +// CompactTextString is the same as CompactText, but returns the string directly. +func CompactTextString(pb Message) string { + var buf bytes.Buffer + marshalText(&buf, pb, true) + return buf.String() +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser.go new file mode 100644 index 000000000..8bce36920 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser.go @@ -0,0 +1,784 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for parsing the Text protocol buffer format. +// TODO: message sets. + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "unicode/utf8" +) + +type ParseError struct { + Message string + Line int // 1-based line number + Offset int // 0-based byte offset from start of input +} + +func (p *ParseError) Error() string { + if p.Line == 1 { + // show offset only for first line + return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) + } + return fmt.Sprintf("line %d: %v", p.Line, p.Message) +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func (t *token) String() string { + if t.err == nil { + return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) + } + return fmt.Sprintf("parse error: %v", t.err) +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +// Numbers and identifiers are matched by [-+._A-Za-z0-9] +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +var ( + errBadUTF8 = errors.New("proto: bad UTF-8") + errBadHex = errors.New("proto: bad hexadecimal") +) + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + base := 8 + ss := s[:2] + s = s[2:] + if r == 'x' || r == 'X' { + base = 16 + } else { + ss = string(r) + ss + } + i, err := strconv.ParseUint(ss, base, 8) + if err != nil { + return "", "", err + } + return string([]byte{byte(i)}), s, nil + case 'u', 'U': + n := 4 + if r == 'U' { + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) + } + + bs := make([]byte, n/2) + for i := 0; i < n; i += 2 { + a, ok1 := unhex(s[i]) + b, ok2 := unhex(s[i+1]) + if !ok1 || !ok2 { + return "", "", errBadHex + } + bs[i/2] = a<<4 | b + } + s = s[n:] + return string(bs), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +// Adapted from src/pkg/strconv/quote.go. +func unhex(b byte) (v byte, ok bool) { + switch { + case '0' <= b && b <= '9': + return b - '0', true + case 'a' <= b && b <= 'f': + return b - 'a' + 10, true + case 'A' <= b && b <= 'F': + return b - 'A' + 10, true + } + return 0, false +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && p.cur.value[0] == '"' { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || p.s[0] != '"' { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +// Return a RequiredNotSetError indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < st.NumField(); i++ { + if !isNil(sv.Field(i)) { + continue + } + + props := sprops.Prop[i] + if props.Required { + return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} + } + } + return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen +} + +// Returns the index in the struct for the named field, as well as the parsed tag properties. +func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { + i, ok := sprops.decoderOrigNames[name] + if ok { + return i, sprops.Prop[i], true + } + return -1, nil, false +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + // Colon is optional when the field is a group or message. + needColon := true + switch props.Wire { + case "group": + needColon = false + case "bytes": + // A "bytes" field is either a message, a string, or a repeated field; + // those three become *T, *string and []T respectively, so we can check for + // this field being a pointer to a non-string. + if typ.Kind() == reflect.Ptr { + // *T or *string + if typ.Elem().Kind() == reflect.String { + break + } + } else if typ.Kind() == reflect.Slice { + // []T or []*T + if typ.Elem().Kind() != reflect.Ptr { + break + } + } else if typ.Kind() == reflect.String { + // The proto3 exception is for a string field, + // which requires a colon. + break + } + needColon = false + } + if needColon { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +func (p *textParser) readStruct(sv reflect.Value, terminator string) error { + st := sv.Type() + sprops := GetProperties(st) + reqCount := sprops.reqCount + var reqFieldErr error + fieldSet := make(map[string]bool) + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]". + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + // Looks like an extension. + // + // TODO: Check whether we need to handle + // namespace rooted names (e.g. ".something.Foo"). + tok = p.next() + if tok.err != nil { + return tok.err + } + var desc *ExtensionDesc + // This could be faster, but it's functional. + // TODO: Do something smarter than a linear scan. + for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { + if d.Name == tok.value { + desc = d + break + } + } + if desc == nil { + return p.errorf("unrecognized extension %q", tok.value) + } + // Check the extension terminator. + tok = p.next() + if tok.err != nil { + return tok.err + } + if tok.value != "]" { + return p.errorf("unrecognized extension terminator %q", tok.value) + } + + props := &Properties{} + props.Parse(desc.Tag) + + typ := reflect.TypeOf(desc.ExtensionType) + if err := p.checkForColon(props, typ); err != nil { + return err + } + + rep := desc.repeated() + + // Read the extension structure, and set it in + // the value we're constructing. + var ext reflect.Value + if !rep { + ext = reflect.New(typ).Elem() + } else { + ext = reflect.New(typ.Elem()).Elem() + } + if err := p.readAny(ext, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + ep := sv.Addr().Interface().(extendableProto) + if !rep { + SetExtension(ep, desc, ext.Interface()) + } else { + old, err := GetExtension(ep, desc) + var sl reflect.Value + if err == nil { + sl = reflect.ValueOf(old) // existing slice + } else { + sl = reflect.MakeSlice(typ, 0, 1) + } + sl = reflect.Append(sl, ext) + SetExtension(ep, desc, sl.Interface()) + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := tok.value + var dst reflect.Value + fi, props, ok := structFieldByName(sprops, name) + if ok { + dst = sv.Field(fi) + } else if oop, ok := sprops.OneofTypes[name]; ok { + // It is a oneof. + props = oop.Prop + nv := reflect.New(oop.Type.Elem()) + dst = nv.Elem().Field(0) + sv.Field(oop.Field).Set(nv) + } + if !dst.IsValid() { + return p.errorf("unknown field name %q in %v", name, st) + } + + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // Technically the "key" and "value" could come in any order, + // but in practice they won't. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + if err := p.consumeToken("key"); err != nil { + return err + } + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.mkeyprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + if err := p.consumeToken("value"); err != nil { + return err + } + if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.mvalprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + if err := p.consumeToken(terminator); err != nil { + return err + } + + dst.SetMapIndex(key, val) + continue + } + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) + } + + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } else if props.Required { + reqCount-- + } + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + + } + + if reqCount > 0 { + return p.missingRequiredFieldError(sv) + } + return reqFieldErr +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in readStruct to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) readAny(v reflect.Value, props *Properties) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "" { + return p.errorf("unexpected EOF") + } + + switch fv := v; fv.Kind() { + case reflect.Slice: + at := v.Type() + if at.Elem().Kind() == reflect.Uint8 { + // Special case for []byte + if tok.value[0] != '"' && tok.value[0] != '\'' { + // Deliberately written out here, as the error after + // this switch statement would write "invalid []byte: ...", + // which is not as user-friendly. + return p.errorf("invalid string: %v", tok.value) + } + bytes := []byte(tok.unquoted) + fv.Set(reflect.ValueOf(bytes)) + return nil + } + // Repeated field. May already exist. + flen := fv.Len() + if flen == fv.Cap() { + nav := reflect.MakeSlice(at, flen, 2*flen+1) + reflect.Copy(nav, fv) + fv.Set(nav) + } + fv.SetLen(flen + 1) + + // Read one. + p.back() + return p.readAny(fv.Index(flen), props) + case reflect.Bool: + // Either "true", "false", 1 or 0. + switch tok.value { + case "true", "1": + fv.SetBool(true) + return nil + case "false", "0": + fv.SetBool(false) + return nil + } + case reflect.Float32, reflect.Float64: + v := tok.value + // Ignore 'f' for compatibility with output generated by C++, but don't + // remove 'f' when the value is "-inf" or "inf". + if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { + v = v[:len(v)-1] + } + if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { + fv.SetFloat(f) + return nil + } + case reflect.Int32: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + fv.SetInt(x) + return nil + } + + if len(props.Enum) == 0 { + break + } + m, ok := enumValueMaps[props.Enum] + if !ok { + break + } + x, ok := m[tok.value] + if !ok { + break + } + fv.SetInt(int64(x)) + return nil + case reflect.Int64: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + fv.SetInt(x) + return nil + } + + case reflect.Ptr: + // A basic field (indirected through pointer), or a repeated message/group + p.back() + fv.Set(reflect.New(fv.Type().Elem())) + return p.readAny(fv.Elem(), props) + case reflect.String: + if tok.value[0] == '"' || tok.value[0] == '\'' { + fv.SetString(tok.unquoted) + return nil + } + case reflect.Struct: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + // TODO: Handle nested messages which implement encoding.TextUnmarshaler. + return p.readStruct(fv, terminator) + case reflect.Uint32: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + fv.SetUint(uint64(x)) + return nil + } + case reflect.Uint64: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + fv.SetUint(x) + return nil + } + } + return p.errorf("invalid %v: %v", v.Type(), tok.value) +} + +// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb +// before starting to unmarshal, so any existing data in pb is always removed. +// If a required field is not set and no other error occurs, +// UnmarshalText returns *RequiredNotSetError. +func UnmarshalText(s string, pb Message) error { + if um, ok := pb.(encoding.TextUnmarshaler); ok { + err := um.UnmarshalText([]byte(s)) + return err + } + pb.Reset() + v := reflect.ValueOf(pb) + if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { + return pe + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser_test.go new file mode 100644 index 000000000..a2a960447 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser_test.go @@ -0,0 +1,523 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "math" + "reflect" + "testing" + + . "github.com/golang/protobuf/proto" + proto3pb "github.com/golang/protobuf/proto/proto3_proto" + . "github.com/golang/protobuf/proto/testdata" +) + +type UnmarshalTextTest struct { + in string + err string // if "", no error expected + out *MyMessage +} + +func buildExtStructTest(text string) UnmarshalTextTest { + msg := &MyMessage{ + Count: Int32(42), + } + SetExtension(msg, E_Ext_More, &Ext{ + Data: String("Hello, world!"), + }) + return UnmarshalTextTest{in: text, out: msg} +} + +func buildExtDataTest(text string) UnmarshalTextTest { + msg := &MyMessage{ + Count: Int32(42), + } + SetExtension(msg, E_Ext_Text, String("Hello, world!")) + SetExtension(msg, E_Ext_Number, Int32(1729)) + return UnmarshalTextTest{in: text, out: msg} +} + +func buildExtRepStringTest(text string) UnmarshalTextTest { + msg := &MyMessage{ + Count: Int32(42), + } + if err := SetExtension(msg, E_Greeting, []string{"bula", "hola"}); err != nil { + panic(err) + } + return UnmarshalTextTest{in: text, out: msg} +} + +var unMarshalTextTests = []UnmarshalTextTest{ + // Basic + { + in: " count:42\n name:\"Dave\" ", + out: &MyMessage{ + Count: Int32(42), + Name: String("Dave"), + }, + }, + + // Empty quoted string + { + in: `count:42 name:""`, + out: &MyMessage{ + Count: Int32(42), + Name: String(""), + }, + }, + + // Quoted string concatenation + { + in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`, + out: &MyMessage{ + Count: Int32(42), + Name: String("My name is elsewhere"), + }, + }, + + // Quoted string with escaped apostrophe + { + in: `count:42 name: "HOLIDAY - New Year\'s Day"`, + out: &MyMessage{ + Count: Int32(42), + Name: String("HOLIDAY - New Year's Day"), + }, + }, + + // Quoted string with single quote + { + in: `count:42 name: 'Roger "The Ramster" Ramjet'`, + out: &MyMessage{ + Count: Int32(42), + Name: String(`Roger "The Ramster" Ramjet`), + }, + }, + + // Quoted string with all the accepted special characters from the C++ test + { + in: `count:42 name: ` + "\"\\\"A string with \\' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"", + out: &MyMessage{ + Count: Int32(42), + Name: String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces"), + }, + }, + + // Quoted string with quoted backslash + { + in: `count:42 name: "\\'xyz"`, + out: &MyMessage{ + Count: Int32(42), + Name: String(`\'xyz`), + }, + }, + + // Quoted string with UTF-8 bytes. + { + in: "count:42 name: '\303\277\302\201\xAB'", + out: &MyMessage{ + Count: Int32(42), + Name: String("\303\277\302\201\xAB"), + }, + }, + + // Bad quoted string + { + in: `inner: < host: "\0" >` + "\n", + err: `line 1.15: invalid quoted string "\0": \0 requires 2 following digits`, + }, + + // Number too large for int64 + { + in: "count: 1 others { key: 123456789012345678901 }", + err: "line 1.23: invalid int64: 123456789012345678901", + }, + + // Number too large for int32 + { + in: "count: 1234567890123", + err: "line 1.7: invalid int32: 1234567890123", + }, + + // Number in hexadecimal + { + in: "count: 0x2beef", + out: &MyMessage{ + Count: Int32(0x2beef), + }, + }, + + // Number in octal + { + in: "count: 024601", + out: &MyMessage{ + Count: Int32(024601), + }, + }, + + // Floating point number with "f" suffix + { + in: "count: 4 others:< weight: 17.0f >", + out: &MyMessage{ + Count: Int32(4), + Others: []*OtherMessage{ + { + Weight: Float32(17), + }, + }, + }, + }, + + // Floating point positive infinity + { + in: "count: 4 bigfloat: inf", + out: &MyMessage{ + Count: Int32(4), + Bigfloat: Float64(math.Inf(1)), + }, + }, + + // Floating point negative infinity + { + in: "count: 4 bigfloat: -inf", + out: &MyMessage{ + Count: Int32(4), + Bigfloat: Float64(math.Inf(-1)), + }, + }, + + // Number too large for float32 + { + in: "others:< weight: 12345678901234567890123456789012345678901234567890 >", + err: "line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890", + }, + + // Number posing as a quoted string + { + in: `inner: < host: 12 >` + "\n", + err: `line 1.15: invalid string: 12`, + }, + + // Quoted string posing as int32 + { + in: `count: "12"`, + err: `line 1.7: invalid int32: "12"`, + }, + + // Quoted string posing a float32 + { + in: `others:< weight: "17.4" >`, + err: `line 1.17: invalid float32: "17.4"`, + }, + + // Enum + { + in: `count:42 bikeshed: BLUE`, + out: &MyMessage{ + Count: Int32(42), + Bikeshed: MyMessage_BLUE.Enum(), + }, + }, + + // Repeated field + { + in: `count:42 pet: "horsey" pet:"bunny"`, + out: &MyMessage{ + Count: Int32(42), + Pet: []string{"horsey", "bunny"}, + }, + }, + + // Repeated message with/without colon and <>/{} + { + in: `count:42 others:{} others{} others:<> others:{}`, + out: &MyMessage{ + Count: Int32(42), + Others: []*OtherMessage{ + {}, + {}, + {}, + {}, + }, + }, + }, + + // Missing colon for inner message + { + in: `count:42 inner < host: "cauchy.syd" >`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("cauchy.syd"), + }, + }, + }, + + // Missing colon for string field + { + in: `name "Dave"`, + err: `line 1.5: expected ':', found "\"Dave\""`, + }, + + // Missing colon for int32 field + { + in: `count 42`, + err: `line 1.6: expected ':', found "42"`, + }, + + // Missing required field + { + in: `name: "Pawel"`, + err: `proto: required field "testdata.MyMessage.count" not set`, + out: &MyMessage{ + Name: String("Pawel"), + }, + }, + + // Repeated non-repeated field + { + in: `name: "Rob" name: "Russ"`, + err: `line 1.12: non-repeated field "name" was repeated`, + }, + + // Group + { + in: `count: 17 SomeGroup { group_field: 12 }`, + out: &MyMessage{ + Count: Int32(17), + Somegroup: &MyMessage_SomeGroup{ + GroupField: Int32(12), + }, + }, + }, + + // Semicolon between fields + { + in: `count:3;name:"Calvin"`, + out: &MyMessage{ + Count: Int32(3), + Name: String("Calvin"), + }, + }, + // Comma between fields + { + in: `count:4,name:"Ezekiel"`, + out: &MyMessage{ + Count: Int32(4), + Name: String("Ezekiel"), + }, + }, + + // Extension + buildExtStructTest(`count: 42 [testdata.Ext.more]:`), + buildExtStructTest(`count: 42 [testdata.Ext.more] {data:"Hello, world!"}`), + buildExtDataTest(`count: 42 [testdata.Ext.text]:"Hello, world!" [testdata.Ext.number]:1729`), + buildExtRepStringTest(`count: 42 [testdata.greeting]:"bula" [testdata.greeting]:"hola"`), + + // Big all-in-one + { + in: "count:42 # Meaning\n" + + `name:"Dave" ` + + `quote:"\"I didn't want to go.\"" ` + + `pet:"bunny" ` + + `pet:"kitty" ` + + `pet:"horsey" ` + + `inner:<` + + ` host:"footrest.syd" ` + + ` port:7001 ` + + ` connected:true ` + + `> ` + + `others:<` + + ` key:3735928559 ` + + ` value:"\x01A\a\f" ` + + `> ` + + `others:<` + + " weight:58.9 # Atomic weight of Co\n" + + ` inner:<` + + ` host:"lesha.mtv" ` + + ` port:8002 ` + + ` >` + + `>`, + out: &MyMessage{ + Count: Int32(42), + Name: String("Dave"), + Quote: String(`"I didn't want to go."`), + Pet: []string{"bunny", "kitty", "horsey"}, + Inner: &InnerMessage{ + Host: String("footrest.syd"), + Port: Int32(7001), + Connected: Bool(true), + }, + Others: []*OtherMessage{ + { + Key: Int64(3735928559), + Value: []byte{0x1, 'A', '\a', '\f'}, + }, + { + Weight: Float32(58.9), + Inner: &InnerMessage{ + Host: String("lesha.mtv"), + Port: Int32(8002), + }, + }, + }, + }, + }, +} + +func TestUnmarshalText(t *testing.T) { + for i, test := range unMarshalTextTests { + pb := new(MyMessage) + err := UnmarshalText(test.in, pb) + if test.err == "" { + // We don't expect failure. + if err != nil { + t.Errorf("Test %d: Unexpected error: %v", i, err) + } else if !reflect.DeepEqual(pb, test.out) { + t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", + i, pb, test.out) + } + } else { + // We do expect failure. + if err == nil { + t.Errorf("Test %d: Didn't get expected error: %v", i, test.err) + } else if err.Error() != test.err { + t.Errorf("Test %d: Incorrect error.\nHave: %v\nWant: %v", + i, err.Error(), test.err) + } else if _, ok := err.(*RequiredNotSetError); ok && test.out != nil && !reflect.DeepEqual(pb, test.out) { + t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", + i, pb, test.out) + } + } + } +} + +func TestUnmarshalTextCustomMessage(t *testing.T) { + msg := &textMessage{} + if err := UnmarshalText("custom", msg); err != nil { + t.Errorf("Unexpected error from custom unmarshal: %v", err) + } + if UnmarshalText("not custom", msg) == nil { + t.Errorf("Didn't get expected error from custom unmarshal") + } +} + +// Regression test; this caused a panic. +func TestRepeatedEnum(t *testing.T) { + pb := new(RepeatedEnum) + if err := UnmarshalText("color: RED", pb); err != nil { + t.Fatal(err) + } + exp := &RepeatedEnum{ + Color: []RepeatedEnum_Color{RepeatedEnum_RED}, + } + if !Equal(pb, exp) { + t.Errorf("Incorrect populated \nHave: %v\nWant: %v", pb, exp) + } +} + +func TestProto3TextParsing(t *testing.T) { + m := new(proto3pb.Message) + const in = `name: "Wallace" true_scotsman: true` + want := &proto3pb.Message{ + Name: "Wallace", + TrueScotsman: true, + } + if err := UnmarshalText(in, m); err != nil { + t.Fatal(err) + } + if !Equal(m, want) { + t.Errorf("\n got %v\nwant %v", m, want) + } +} + +func TestMapParsing(t *testing.T) { + m := new(MessageWithMap) + const in = `name_mapping: name_mapping:` + + `msg_mapping:,>` + // separating commas are okay + `msg_mapping>` + // no colon after "value" + `byte_mapping:` + want := &MessageWithMap{ + NameMapping: map[int32]string{ + 1: "Beatles", + 1234: "Feist", + }, + MsgMapping: map[int64]*FloatingPoint{ + -4: {F: Float64(2.0)}, + -2: {F: Float64(4.0)}, + }, + ByteMapping: map[bool][]byte{ + true: []byte("so be it"), + }, + } + if err := UnmarshalText(in, m); err != nil { + t.Fatal(err) + } + if !Equal(m, want) { + t.Errorf("\n got %v\nwant %v", m, want) + } +} + +func TestOneofParsing(t *testing.T) { + const in = `name:"Shrek"` + m := new(Communique) + want := &Communique{Union: &Communique_Name{"Shrek"}} + if err := UnmarshalText(in, m); err != nil { + t.Fatal(err) + } + if !Equal(m, want) { + t.Errorf("\n got %v\nwant %v", m, want) + } +} + +var benchInput string + +func init() { + benchInput = "count: 4\n" + for i := 0; i < 1000; i++ { + benchInput += "pet: \"fido\"\n" + } + + // Check it is valid input. + pb := new(MyMessage) + err := UnmarshalText(benchInput, pb) + if err != nil { + panic("Bad benchmark input: " + err.Error()) + } +} + +func BenchmarkUnmarshalText(b *testing.B) { + pb := new(MyMessage) + for i := 0; i < b.N; i++ { + UnmarshalText(benchInput, pb) + } + b.SetBytes(int64(len(benchInput))) +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_test.go new file mode 100644 index 000000000..3eabacac8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_test.go @@ -0,0 +1,474 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "bytes" + "errors" + "io/ioutil" + "math" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + + proto3pb "github.com/golang/protobuf/proto/proto3_proto" + pb "github.com/golang/protobuf/proto/testdata" +) + +// textMessage implements the methods that allow it to marshal and unmarshal +// itself as text. +type textMessage struct { +} + +func (*textMessage) MarshalText() ([]byte, error) { + return []byte("custom"), nil +} + +func (*textMessage) UnmarshalText(bytes []byte) error { + if string(bytes) != "custom" { + return errors.New("expected 'custom'") + } + return nil +} + +func (*textMessage) Reset() {} +func (*textMessage) String() string { return "" } +func (*textMessage) ProtoMessage() {} + +func newTestMessage() *pb.MyMessage { + msg := &pb.MyMessage{ + Count: proto.Int32(42), + Name: proto.String("Dave"), + Quote: proto.String(`"I didn't want to go."`), + Pet: []string{"bunny", "kitty", "horsey"}, + Inner: &pb.InnerMessage{ + Host: proto.String("footrest.syd"), + Port: proto.Int32(7001), + Connected: proto.Bool(true), + }, + Others: []*pb.OtherMessage{ + { + Key: proto.Int64(0xdeadbeef), + Value: []byte{1, 65, 7, 12}, + }, + { + Weight: proto.Float32(6.022), + Inner: &pb.InnerMessage{ + Host: proto.String("lesha.mtv"), + Port: proto.Int32(8002), + }, + }, + }, + Bikeshed: pb.MyMessage_BLUE.Enum(), + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(8), + }, + // One normally wouldn't do this. + // This is an undeclared tag 13, as a varint (wire type 0) with value 4. + XXX_unrecognized: []byte{13<<3 | 0, 4}, + } + ext := &pb.Ext{ + Data: proto.String("Big gobs for big rats"), + } + if err := proto.SetExtension(msg, pb.E_Ext_More, ext); err != nil { + panic(err) + } + greetings := []string{"adg", "easy", "cow"} + if err := proto.SetExtension(msg, pb.E_Greeting, greetings); err != nil { + panic(err) + } + + // Add an unknown extension. We marshal a pb.Ext, and fake the ID. + b, err := proto.Marshal(&pb.Ext{Data: proto.String("3G skiing")}) + if err != nil { + panic(err) + } + b = append(proto.EncodeVarint(201<<3|proto.WireBytes), b...) + proto.SetRawExtension(msg, 201, b) + + // Extensions can be plain fields, too, so let's test that. + b = append(proto.EncodeVarint(202<<3|proto.WireVarint), 19) + proto.SetRawExtension(msg, 202, b) + + return msg +} + +const text = `count: 42 +name: "Dave" +quote: "\"I didn't want to go.\"" +pet: "bunny" +pet: "kitty" +pet: "horsey" +inner: < + host: "footrest.syd" + port: 7001 + connected: true +> +others: < + key: 3735928559 + value: "\001A\007\014" +> +others: < + weight: 6.022 + inner: < + host: "lesha.mtv" + port: 8002 + > +> +bikeshed: BLUE +SomeGroup { + group_field: 8 +} +/* 2 unknown bytes */ +13: 4 +[testdata.Ext.more]: < + data: "Big gobs for big rats" +> +[testdata.greeting]: "adg" +[testdata.greeting]: "easy" +[testdata.greeting]: "cow" +/* 13 unknown bytes */ +201: "\t3G skiing" +/* 3 unknown bytes */ +202: 19 +` + +func TestMarshalText(t *testing.T) { + buf := new(bytes.Buffer) + if err := proto.MarshalText(buf, newTestMessage()); err != nil { + t.Fatalf("proto.MarshalText: %v", err) + } + s := buf.String() + if s != text { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, text) + } +} + +func TestMarshalTextCustomMessage(t *testing.T) { + buf := new(bytes.Buffer) + if err := proto.MarshalText(buf, &textMessage{}); err != nil { + t.Fatalf("proto.MarshalText: %v", err) + } + s := buf.String() + if s != "custom" { + t.Errorf("Got %q, expected %q", s, "custom") + } +} +func TestMarshalTextNil(t *testing.T) { + want := "" + tests := []proto.Message{nil, (*pb.MyMessage)(nil)} + for i, test := range tests { + buf := new(bytes.Buffer) + if err := proto.MarshalText(buf, test); err != nil { + t.Fatal(err) + } + if got := buf.String(); got != want { + t.Errorf("%d: got %q want %q", i, got, want) + } + } +} + +func TestMarshalTextUnknownEnum(t *testing.T) { + // The Color enum only specifies values 0-2. + m := &pb.MyMessage{Bikeshed: pb.MyMessage_Color(3).Enum()} + got := m.String() + const want = `bikeshed:3 ` + if got != want { + t.Errorf("\n got %q\nwant %q", got, want) + } +} + +func TestTextOneof(t *testing.T) { + tests := []struct { + m proto.Message + want string + }{ + // zero message + {&pb.Communique{}, ``}, + // scalar field + {&pb.Communique{Union: &pb.Communique_Number{4}}, `number:4`}, + // message field + {&pb.Communique{Union: &pb.Communique_Msg{ + &pb.Strings{StringField: proto.String("why hello!")}, + }}, `msg:`}, + // bad oneof (should not panic) + {&pb.Communique{Union: &pb.Communique_Msg{nil}}, `msg:/* nil */`}, + } + for _, test := range tests { + got := strings.TrimSpace(test.m.String()) + if got != test.want { + t.Errorf("\n got %s\nwant %s", got, test.want) + } + } +} + +func BenchmarkMarshalTextBuffered(b *testing.B) { + buf := new(bytes.Buffer) + m := newTestMessage() + for i := 0; i < b.N; i++ { + buf.Reset() + proto.MarshalText(buf, m) + } +} + +func BenchmarkMarshalTextUnbuffered(b *testing.B) { + w := ioutil.Discard + m := newTestMessage() + for i := 0; i < b.N; i++ { + proto.MarshalText(w, m) + } +} + +func compact(src string) string { + // s/[ \n]+/ /g; s/ $//; + dst := make([]byte, len(src)) + space, comment := false, false + j := 0 + for i := 0; i < len(src); i++ { + if strings.HasPrefix(src[i:], "/*") { + comment = true + i++ + continue + } + if comment && strings.HasPrefix(src[i:], "*/") { + comment = false + i++ + continue + } + if comment { + continue + } + c := src[i] + if c == ' ' || c == '\n' { + space = true + continue + } + if j > 0 && (dst[j-1] == ':' || dst[j-1] == '<' || dst[j-1] == '{') { + space = false + } + if c == '{' { + space = false + } + if space { + dst[j] = ' ' + j++ + space = false + } + dst[j] = c + j++ + } + if space { + dst[j] = ' ' + j++ + } + return string(dst[0:j]) +} + +var compactText = compact(text) + +func TestCompactText(t *testing.T) { + s := proto.CompactTextString(newTestMessage()) + if s != compactText { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v\n===\n", s, compactText) + } +} + +func TestStringEscaping(t *testing.T) { + testCases := []struct { + in *pb.Strings + out string + }{ + { + // Test data from C++ test (TextFormatTest.StringEscape). + // Single divergence: we don't escape apostrophes. + &pb.Strings{StringField: proto.String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces")}, + "string_field: \"\\\"A string with ' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"\n", + }, + { + // Test data from the same C++ test. + &pb.Strings{StringField: proto.String("\350\260\267\346\255\214")}, + "string_field: \"\\350\\260\\267\\346\\255\\214\"\n", + }, + { + // Some UTF-8. + &pb.Strings{StringField: proto.String("\x00\x01\xff\x81")}, + `string_field: "\000\001\377\201"` + "\n", + }, + } + + for i, tc := range testCases { + var buf bytes.Buffer + if err := proto.MarshalText(&buf, tc.in); err != nil { + t.Errorf("proto.MarsalText: %v", err) + continue + } + s := buf.String() + if s != tc.out { + t.Errorf("#%d: Got:\n%s\nExpected:\n%s\n", i, s, tc.out) + continue + } + + // Check round-trip. + pb := new(pb.Strings) + if err := proto.UnmarshalText(s, pb); err != nil { + t.Errorf("#%d: UnmarshalText: %v", i, err) + continue + } + if !proto.Equal(pb, tc.in) { + t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pb) + } + } +} + +// A limitedWriter accepts some output before it fails. +// This is a proxy for something like a nearly-full or imminently-failing disk, +// or a network connection that is about to die. +type limitedWriter struct { + b bytes.Buffer + limit int +} + +var outOfSpace = errors.New("proto: insufficient space") + +func (w *limitedWriter) Write(p []byte) (n int, err error) { + var avail = w.limit - w.b.Len() + if avail <= 0 { + return 0, outOfSpace + } + if len(p) <= avail { + return w.b.Write(p) + } + n, _ = w.b.Write(p[:avail]) + return n, outOfSpace +} + +func TestMarshalTextFailing(t *testing.T) { + // Try lots of different sizes to exercise more error code-paths. + for lim := 0; lim < len(text); lim++ { + buf := new(limitedWriter) + buf.limit = lim + err := proto.MarshalText(buf, newTestMessage()) + // We expect a certain error, but also some partial results in the buffer. + if err != outOfSpace { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", err, outOfSpace) + } + s := buf.b.String() + x := text[:buf.limit] + if s != x { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, x) + } + } +} + +func TestFloats(t *testing.T) { + tests := []struct { + f float64 + want string + }{ + {0, "0"}, + {4.7, "4.7"}, + {math.Inf(1), "inf"}, + {math.Inf(-1), "-inf"}, + {math.NaN(), "nan"}, + } + for _, test := range tests { + msg := &pb.FloatingPoint{F: &test.f} + got := strings.TrimSpace(msg.String()) + want := `f:` + test.want + if got != want { + t.Errorf("f=%f: got %q, want %q", test.f, got, want) + } + } +} + +func TestRepeatedNilText(t *testing.T) { + m := &pb.MessageList{ + Message: []*pb.MessageList_Message{ + nil, + &pb.MessageList_Message{ + Name: proto.String("Horse"), + }, + nil, + }, + } + want := `Message +Message { + name: "Horse" +} +Message +` + if s := proto.MarshalTextString(m); s != want { + t.Errorf(" got: %s\nwant: %s", s, want) + } +} + +func TestProto3Text(t *testing.T) { + tests := []struct { + m proto.Message + want string + }{ + // zero message + {&proto3pb.Message{}, ``}, + // zero message except for an empty byte slice + {&proto3pb.Message{Data: []byte{}}, ``}, + // trivial case + {&proto3pb.Message{Name: "Rob", HeightInCm: 175}, `name:"Rob" height_in_cm:175`}, + // empty map + {&pb.MessageWithMap{}, ``}, + // non-empty map; map format is the same as a repeated struct, + // and they are sorted by key (numerically for numeric keys). + { + &pb.MessageWithMap{NameMapping: map[int32]string{ + -1: "Negatory", + 7: "Lucky", + 1234: "Feist", + 6345789: "Otis", + }}, + `name_mapping: ` + + `name_mapping: ` + + `name_mapping: ` + + `name_mapping:`, + }, + // map with nil value; not well-defined, but we shouldn't crash + { + &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{7: nil}}, + `msg_mapping:`, + }, + } + for _, test := range tests { + got := strings.TrimSpace(test.m.String()) + if got != test.want { + t.Errorf("\n got %s\nwant %s", got, test.want) + } + } +} diff --git a/Godeps/_workspace/src/github.com/twinj/uuid/.gitignore b/Godeps/_workspace/src/github.com/twinj/uuid/.gitignore new file mode 100644 index 000000000..43ab61594 --- /dev/null +++ b/Godeps/_workspace/src/github.com/twinj/uuid/.gitignore @@ -0,0 +1 @@ +uuid diff --git a/Godeps/_workspace/src/github.com/twinj/uuid/.travis.yml b/Godeps/_workspace/src/github.com/twinj/uuid/.travis.yml new file mode 100644 index 000000000..4d553517f --- /dev/null +++ b/Godeps/_workspace/src/github.com/twinj/uuid/.travis.yml @@ -0,0 +1,8 @@ +language: go +go: + - 1.4 + - 1.3 + - 1.2 + - tip +notifications: + email: true \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/twinj/uuid/LICENSE b/Godeps/_workspace/src/github.com/twinj/uuid/LICENSE new file mode 100644 index 000000000..a92c1e08f --- /dev/null +++ b/Godeps/_workspace/src/github.com/twinj/uuid/LICENSE @@ -0,0 +1,20 @@ +Copyright (C) 2011 by Krzysztof Kowalik +Copyright (C) 2014 by Daniel Kemp Derivative work + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/twinj/uuid/README.md b/Godeps/_workspace/src/github.com/twinj/uuid/README.md new file mode 100644 index 000000000..40f9957d1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/twinj/uuid/README.md @@ -0,0 +1,86 @@ +Go UUID implementation +======================== + +[![Build Status](https://travis-ci.org/twinj/uuid.png?branch=master)](https://travis-ci.org/twinj/uuid) +[![GoDoc](http://godoc.org/github.com/twinj/uuid?status.png)](http://godoc.org/github.com/twinj/uuid) + +This package provides RFC 4122 compliant UUIDs. +It will generate the following: + +* Version 1: based on timestamp and MAC address +* Version 3: based on MD5 hash +* Version 4: based on cryptographically secure random numbers +* Version 5: based on SHA-1 hash + +Functions NewV1, NewV3, NewV4, NewV5, New, NewHex and Parse() for generating versions 3, 4 +and 5 UUIDs are as specified in [RFC 4122](http://www.ietf.org/rfc/rfc4122.txt). + +# Requirements + +Go 1.4, 1.3, 1.2 and tip supported. + +# Recent Changes + +* Removed use of OS Thread locking and runtime package requirement +* Changed String() output to CleanHyphen to match the canonical standard +* Plenty of minor change and housekeeping +* Removed default saver and replaced with interface +* API changes to simplify use. +* Added formatting support for user defined formats +* Added support for Google App Engine +* Variant type bits are now set correctly +* Variant type can now be retrieved more efficiently +* New tests for variant setting to confirm correctness +* New tests added to confirm proper version setting +* Type UUID change to UUIDArray for V3-5 UUIDs and UUIDStruct added for V1 UUIDs +** These implement the BinaryMarshaller and BinaryUnmarshaller interfaces +* New was added to create a base UUID from a []byte slice - this uses UUIDArray +* ParseHex was renamed to ParseUUID +* NewHex now performs unsafe creation of UUID from a hex string +* NewV3 and NewV5 now take anything that implements the Stringer interface +* V1 UUIDs can now be created +* The printing format can be changed + +## Installation + +Use the `go` tool: + + $ go get github.com/twinj/uuid + +## Usage + +See [documentation and examples](http://godoc.org/github.com/twinj/uuid) +for more information. + + var config = uuid.StateSaverConfig{SaveReport: true, SaveSchedule: 30 * time.Minute} + uuid.SetupFileSystemStateSaver(config) + + u1 := uuid.NewV1() + uP, _ := uuid.Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + u3 := uuid.NewV3(uP, uuid.Name("test")) + u4 := uuid.NewV4() + fmt.Printf(print, u4.Version(), u4.Variant(), u4) + + u5 := uuid.NewV5(uuid.NamespaceURL, uuid.Name("test")) + + if uuid.Equal(u1, u3) { + fmt.Printf("Will never happen") + } + fmt.Printf(uuid.Formatter(u5, uuid.CurlyHyphen)) + + uuid.SwitchFormat(uuid.BracketHyphen) + +## Copyright + +This is a derivative work + +Orginal version from +Copyright (C) 2011 by Krzysztof Kowalik . +See [COPYING](https://github.com/nu7hatch/gouuid/tree/master/COPYING) +file for details. + +Also see: Algorithm details in [RFC 4122](http://www.ietf.org/rfc/rfc4122.txt). + +Copyright (C) 2014 twinj@github.com +See [LICENSE](https://github.com/twinj/uuid/tree/master/LICENSE) +file for details. diff --git a/Godeps/_workspace/src/github.com/twinj/uuid/array.go b/Godeps/_workspace/src/github.com/twinj/uuid/array.go new file mode 100644 index 000000000..add7e19be --- /dev/null +++ b/Godeps/_workspace/src/github.com/twinj/uuid/array.go @@ -0,0 +1,68 @@ +package uuid + +/**************** + * Date: 1/02/14 + * Time: 10:08 AM + ***************/ + +const ( + variantIndex = 8 + versionIndex = 6 +) + +// A clean UUID type for simpler UUID versions +type Array [length]byte + +func (Array) Size() int { + return length +} + +func (o Array) Version() int { + return int(o[versionIndex]) >> 4 +} + +func (o *Array) setVersion(pVersion int) { + o[versionIndex] &= 0x0F + o[versionIndex] |= byte(pVersion) << 4 +} + +func (o *Array) Variant() byte { + return variant(o[variantIndex]) +} + +func (o *Array) setVariant(pVariant byte) { + setVariant(&o[variantIndex], pVariant) +} + +func (o *Array) Unmarshal(pData []byte) { + copy(o[:], pData) +} + +func (o *Array) Bytes() []byte { + return o[:] +} + +func (o Array) String() string { + return formatter(&o, format) +} + +func (o Array) Format(pFormat string) string { + return formatter(&o, pFormat) +} + +// Set the three most significant bits (bits 0, 1 and 2) of the +// sequenceHiAndVariant equivalent in the array to ReservedRFC4122. +func (o *Array) setRFC4122Variant() { + o[variantIndex] &= 0x3F + o[variantIndex] |= ReservedRFC4122 +} + +// Marshals the UUID bytes into a slice +func (o *Array) MarshalBinary() ([]byte, error) { + return o.Bytes(), nil +} + +// Un-marshals the data bytes into the UUID. +func (o *Array) UnmarshalBinary(pData []byte) error { + return UnmarshalBinary(o, pData) +} diff --git a/Godeps/_workspace/src/github.com/twinj/uuid/array_test.go b/Godeps/_workspace/src/github.com/twinj/uuid/array_test.go new file mode 100644 index 000000000..7b1f5c100 --- /dev/null +++ b/Godeps/_workspace/src/github.com/twinj/uuid/array_test.go @@ -0,0 +1,31 @@ +package uuid + +/**************** + * Date: 15/02/14 + * Time: 12:49 PM + ***************/ + +import ( + "testing" +) + +var array_bytes = []byte{ + 0xAA, 0xCF, 0xEE, 0x12, + 0xD4, 0x00, + 0x27, 0x23, + 0x00, + 0xD3, + 0x23, 0x12, 0x4A, 0x11, 0x89, 0xFF, +} + +func TestUUID_Array_UnmarshalBinary(t *testing.T) { + u := new(Array) + err := u.UnmarshalBinary([]byte{1, 2, 3, 4, 5}) + if err == nil { + t.Errorf("Expected error due to invalid byte length") + } + err = u.UnmarshalBinary(array_bytes) + if err != nil { + t.Errorf("Expected bytes") + } +} diff --git a/Godeps/_workspace/src/github.com/twinj/uuid/rfc4122.go b/Godeps/_workspace/src/github.com/twinj/uuid/rfc4122.go new file mode 100644 index 000000000..478f1e365 --- /dev/null +++ b/Godeps/_workspace/src/github.com/twinj/uuid/rfc4122.go @@ -0,0 +1,146 @@ +package uuid + +/*************** + * Date: 14/02/14 + * Time: 7:44 PM + ***************/ + +import ( + "crypto/md5" + "crypto/rand" + "crypto/sha1" + "encoding/binary" + "log" + seed "math/rand" + "net" +) + +const ( + length = 16 + + // 3F used by RFC4122 although 1F works for all + variantSet = 0x3F + + // rather than using 0xC0 we use 0xE0 to retrieve the variant + // The result is the same for all other variants + // 0x80 and 0xA0 are used to identify RFC4122 compliance + variantGet = 0xE0 +) + +var ( + // nodeID is the default Namespace node + nodeId = []byte{ + // 00.192.79.212.48.200 + 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8, + } + // The following standard UUIDs are for use with V3 or V5 UUIDs. + NamespaceDNS = &Struct{0x6ba7b810, 0x9dad, 0x11d1, 0x80, 0xb4, nodeId, length} + NamespaceURL = &Struct{0x6ba7b811, 0x9dad, 0x11d1, 0x80, 0xb4, nodeId, length} + NamespaceOID = &Struct{0x6ba7b812, 0x9dad, 0x11d1, 0x80, 0xb4, nodeId, length} + NamespaceX500 = &Struct{0x6ba7b814, 0x9dad, 0x11d1, 0x80, 0xb4, nodeId, length} + + state State +) + +func init() { + seed.Seed((int64(timestamp())^int64(gregorianToUNIXOffset))*0x6ba7b814<<0x6ba7b812 | 1391463463) + state = State{ + randomNode: true, + randomSequence: true, + past: Timestamp((1391463463 * 10000000) + (100 * 10) + gregorianToUNIXOffset), + node: nodeId, + sequence: uint16(seed.Int()) & 0x3FFF, + saver: nil, + } +} + +// NewV1 will generate a new RFC4122 version 1 UUID +func NewV1() UUID { + state.Lock() + now := currentUUIDTimestamp() + state.read(now, currentUUIDNodeId()) + state.persist() + state.Unlock() + return formatV1(now, uint16(1), ReservedRFC4122, state.node) +} + +// NewV3 will generate a new RFC4122 version 3 UUID +// V3 is based on the MD5 hash of a namespace identifier UUID and +// any type which implements the UniqueName interface for the name. +// For strings and slices cast to a Name type +func NewV3(pNs UUID, pName UniqueName) UUID { + o := new(Array) + // Set all bits to MD5 hash generated from namespace and name. + Digest(o, pNs, pName, md5.New()) + o.setRFC4122Variant() + o.setVersion(3) + return o +} + +// NewV4 will generate a new RFC4122 version 4 UUID +// A cryptographically secure random UUID. +func NewV4() UUID { + o := new(Array) + // Read random values (or pseudo-randomly) into Array type. + _, err := rand.Read(o[:length]) + if err != nil { + panic(err) + } + o.setRFC4122Variant() + o.setVersion(4) + return o +} + +// NewV5 will generate a new RFC4122 version 5 UUID +// Generate a UUID based on the SHA-1 hash of a namespace +// identifier and a name. +func NewV5(pNs UUID, pName UniqueName) UUID { + o := new(Array) + Digest(o, pNs, pName, sha1.New()) + o.setRFC4122Variant() + o.setVersion(5) + return o +} + +// either generates a random node when there is an error or gets +// the pre initialised one +func currentUUIDNodeId() (node net.HardwareAddr) { + if state.randomNode { + b := make([]byte, 16+6) + _, err := rand.Read(b) + if err != nil { + log.Println("UUID.currentUUIDNodeId error:", err) + node = nodeId + return + } + h := sha1.New() + h.Write(b) + binary.Write(h, binary.LittleEndian, state.sequence) + node = h.Sum(nil)[:6] + if err != nil { + log.Println("UUID.currentUUIDNodeId error:", err) + node = nodeId + return + } + // Mark as randomly generated + node[0] |= 0x01 + } else { + node = state.node + } + return +} + +// Unmarshal data into struct for V1 UUIDs +func formatV1(pNow Timestamp, pVersion uint16, pVariant byte, pNode []byte) UUID { + o := new(Struct) + o.timeLow = uint32(pNow & 0xFFFFFFFF) + o.timeMid = uint16((pNow >> 32) & 0xFFFF) + o.timeHiAndVersion = uint16((pNow >> 48) & 0x0FFF) + o.timeHiAndVersion |= uint16(pVersion << 12) + o.sequenceLow = byte(state.sequence & 0xFF) + o.sequenceHiAndVariant = byte((state.sequence & 0x3F00) >> 8) + o.sequenceHiAndVariant |= pVariant + o.node = pNode + o.size = length + return o +} diff --git a/Godeps/_workspace/src/github.com/twinj/uuid/rfc4122_test.go b/Godeps/_workspace/src/github.com/twinj/uuid/rfc4122_test.go new file mode 100644 index 000000000..432900c4e --- /dev/null +++ b/Godeps/_workspace/src/github.com/twinj/uuid/rfc4122_test.go @@ -0,0 +1,209 @@ +package uuid + +/**************** + * Date: 16/02/14 + * Time: 11:29 AM + ***************/ + +import ( + "fmt" + "net/url" + "testing" +) + +var ( + goLang Name = "https://google.com/golang.org?q=golang" +) + +const ( + generate = 1000000 +) + +func TestUUID_NewV1(t *testing.T) { + u := NewV1() + if u.Version() != 1 { + t.Errorf("Expected correct version %d, but got %d", 1, u.Version()) + } + if u.Variant() != ReservedRFC4122 { + t.Errorf("Expected RFC4122 variant %x, but got %x", ReservedRFC4122, u.Variant()) + } + if !parseUUIDRegex.MatchString(u.String()) { + t.Errorf("Expected string representation to be valid, given: %s", u.String()) + } +} + +func TestUUID_NewV1Bulk(t *testing.T) { + for i := 0; i < generate; i++ { + NewV1() + } +} + +// Tests NewV3 +func TestUUID_NewV3(t *testing.T) { + u := NewV3(NamespaceURL, goLang) + if u.Version() != 3 { + t.Errorf("Expected correct version %d, but got %d", 3, u.Version()) + } + if u.Variant() != ReservedRFC4122 { + t.Errorf("Expected RFC4122 variant %x, but got %x", ReservedRFC4122, u.Variant()) + } + if !parseUUIDRegex.MatchString(u.String()) { + t.Errorf("Expected string representation to be valid, given: %s", u.String()) + } + ur, _ := url.Parse(string(goLang)) + + // Same NS same name MUST be equal + u2 := NewV3(NamespaceURL, ur) + if !Equal(u2, u) { + t.Errorf("Expected UUIDs generated with same namespace and name to equal but got: %s and %s", u2, u) + } + + // Different NS same name MUST NOT be equal + u3 := NewV3(NamespaceDNS, ur) + if Equal(u3, u) { + t.Errorf("Expected UUIDs generated with different namespace and same name to be different but got: %s and %s", u3, u) + } + + // Same NS different name MUST NOT be equal + u4 := NewV3(NamespaceURL, u) + if Equal(u4, u) { + t.Errorf("Expected UUIDs generated with the same namespace and different names to be different but got: %s and %s", u4, u) + } + + ids := []UUID{ + u, u2, u3, u4, + } + for j, id := range ids { + i := NewV3(NamespaceURL, NewName(string(j), id)) + if Equal(id, i) { + t.Errorf("Expected UUIDs generated with the same namespace and different names to be different but got: %s and %s", id, i) + } + } +} + +func TestUUID_NewV3Bulk(t *testing.T) { + for i := 0; i < generate; i++ { + NewV3(NamespaceDNS, goLang) + } +} + +func TestUUID_NewV4(t *testing.T) { + u := NewV4() + if u.Version() != 4 { + t.Errorf("Expected correct version %d, but got %d", 4, u.Version()) + } + if u.Variant() != ReservedRFC4122 { + t.Errorf("Expected RFC4122 variant %x, but got %x", ReservedRFC4122, u.Variant()) + } + if !parseUUIDRegex.MatchString(u.String()) { + t.Errorf("Expected string representation to be valid, given: %s", u.String()) + } +} + +func TestUUID_NewV4Bulk(t *testing.T) { + for i := 0; i < generate; i++ { + NewV4() + } +} + +// Tests NewV5 +func TestUUID_NewV5(t *testing.T) { + u := NewV5(NamespaceURL, goLang) + if u.Version() != 5 { + t.Errorf("Expected correct version %d, but got %d", 5, u.Version()) + } + if u.Variant() != ReservedRFC4122 { + t.Errorf("Expected RFC4122 variant %x, but got %x", ReservedRFC4122, u.Variant()) + } + if !parseUUIDRegex.MatchString(u.String()) { + t.Errorf("Expected string representation to be valid, given: %s", u.String()) + } + ur, _ := url.Parse(string(goLang)) + + // Same NS same name MUST be equal + u2 := NewV5(NamespaceURL, ur) + if !Equal(u2, u) { + t.Errorf("Expected UUIDs generated with same namespace and name to equal but got: %s and %s", u2, u) + } + + // Different NS same name MUST NOT be equal + u3 := NewV5(NamespaceDNS, ur) + if Equal(u3, u) { + t.Errorf("Expected UUIDs generated with different namespace and same name to be different but got: %s and %s", u3, u) + } + + // Same NS different name MUST NOT be equal + u4 := NewV5(NamespaceURL, u) + if Equal(u4, u) { + t.Errorf("Expected UUIDs generated with the same namespace and different names to be different but got: %s and %s", u4, u) + } + + ids := []UUID{ + u, u2, u3, u4, + } + for j, id := range ids { + i := NewV5(NamespaceURL, NewName(string(j), id)) + if Equal(id, i) { + t.Errorf("Expected UUIDs generated with the same namespace and different names to be different but got: %s and %s", id, i) + } + } +} + +func TestUUID_NewV5Bulk(t *testing.T) { + for i := 0; i < generate; i++ { + NewV5(NamespaceDNS, goLang) + } +} + +// A small test to test uniqueness across all UUIDs created +func TestUUID_EachIsUnique(t *testing.T) { + s := 1000 + ids := make([]UUID, s) + for i := 0; i < s; i++ { + u := NewV1() + ids[i] = u + for j := 0; j < i; j++ { + if Equal(ids[j], u) { + t.Error("Should not create the same V1 UUID", u, ids[j]) + } + } + } + ids = make([]UUID, s) + for i := 0; i < s; i++ { + u := NewV3(NamespaceDNS, NewName(string(i), Name(goLang))) + ids[i] = u + for j := 0; j < i; j++ { + if Equal(ids[j], u) { + t.Error("Should not create the same V3 UUID", u, ids[j]) + } + } + } + ids = make([]UUID, s) + for i := 0; i < s; i++ { + u := NewV4() + ids[i] = u + for j := 0; j < i; j++ { + if Equal(ids[j], u) { + t.Error("Should not create the same V4 UUID", u, ids[j]) + } + } + } + ids = make([]UUID, s) + for i := 0; i < s; i++ { + u := NewV5(NamespaceDNS, NewName(string(i), Name(goLang))) + ids[i] = u + for j := 0; j < i; j++ { + if Equal(ids[j], u) { + t.Error("Should not create the same V5 UUID", u, ids[j]) + } + } + } +} + +// Not really a test but used for visual verification of the defaults +func UUID_NamespaceDefaults() { + fmt.Println(NamespaceDNS) + fmt.Println(NamespaceURL) + fmt.Println(NamespaceOID) + fmt.Println(NamespaceX500) +} diff --git a/Godeps/_workspace/src/github.com/twinj/uuid/saver.go b/Godeps/_workspace/src/github.com/twinj/uuid/saver.go new file mode 100644 index 000000000..0b776a46a --- /dev/null +++ b/Godeps/_workspace/src/github.com/twinj/uuid/saver.go @@ -0,0 +1,140 @@ +package uuid + +/**************** + * Date: 21/06/15 + * Time: 5:48 PM + ***************/ + +import ( + "encoding/gob" + "log" + "os" + "time" +) + +func init() { + gob.Register(stateEntity{}) +} + +func SetupFileSystemStateSaver(pConfig StateSaverConfig) { + saver := &FileSystemSaver{} + saver.saveReport = pConfig.SaveReport + saver.saveSchedule = int64(pConfig.SaveSchedule) + SetupCustomStateSaver(saver) +} + +// A wrapper for default setup of the FileSystemStateSaver +type StateSaverConfig struct { + + // Print save log + SaveReport bool + + // Save every x nanoseconds + SaveSchedule time.Duration +} + +// *********************************************** StateEntity + +// StateEntity acts as a marshaller struct for the state +type stateEntity struct { + Past Timestamp + Node []byte + Sequence uint16 +} + +// This implements the StateSaver interface for UUIDs +type FileSystemSaver struct { + cache *os.File + saveState uint64 + saveReport bool + saveSchedule int64 +} + +// Saves the current state of the generator +// If the scheduled file save is reached then the file is synced +func (o *FileSystemSaver) Save(pState *State) { + if pState.past >= pState.next { + err := o.open() + defer o.cache.Close() + if err != nil { + log.Println("uuid.State.save:", err) + return + } + // do the save + o.encode(pState) + // a tick is 100 nano seconds + pState.next = pState.past + Timestamp(o.saveSchedule / 100) + if o.saveReport { + log.Printf("UUID STATE: SAVED %d", pState.past) + } + } +} + +func (o *FileSystemSaver) Init(pState *State) { + pState.saver = o + err := o.open() + defer o.cache.Close() + if err != nil { + if os.IsNotExist(err) { + log.Printf("'%s' created\n", "uuid.SaveState") + var err error + o.cache, err = os.Create(os.TempDir() + "/state.unique") + if err != nil { + log.Println("uuid.State.init: SaveState error:", err) + goto pastInit + } + o.encode(pState) + } else { + log.Println("uuid.State.init: SaveState error:", err) + goto pastInit + } + } + err = o.decode(pState) + if err != nil { + goto pastInit + } + pState.randomSequence = false +pastInit: + if timestamp() <= pState.past { + pState.sequence++ + } + pState.next = pState.past +} + +func (o *FileSystemSaver) reset() { + o.cache.Seek(0, 0) +} + +func (o *FileSystemSaver) open() error { + var err error + o.cache, err = os.OpenFile(os.TempDir()+"/state.unique", os.O_RDWR, os.ModeExclusive) + return err +} + +// Encodes State generator data into a saved file +func (o *FileSystemSaver) encode(pState *State) { + // ensure reader state is ready for use + o.reset() + enc := gob.NewEncoder(o.cache) + // Wrap private State data into the StateEntity + err := enc.Encode(&stateEntity{pState.past, pState.node, pState.sequence}) + if err != nil { + log.Panic("UUID.encode error:", err) + } +} + +// Decodes StateEntity data into the main State +func (o *FileSystemSaver) decode(pState *State) error { + o.reset() + dec := gob.NewDecoder(o.cache) + entity := stateEntity{} + err := dec.Decode(&entity) + if err != nil { + log.Println("uuid.decode error:", err) + return err + } + pState.past = entity.Past + pState.node = entity.Node + pState.sequence = entity.Sequence + return nil +} diff --git a/Godeps/_workspace/src/github.com/twinj/uuid/saver_test.go b/Godeps/_workspace/src/github.com/twinj/uuid/saver_test.go new file mode 100644 index 000000000..2797e2b1b --- /dev/null +++ b/Godeps/_workspace/src/github.com/twinj/uuid/saver_test.go @@ -0,0 +1,100 @@ +package uuid + +/**************** + * Date: 21/06/15 + * Time: 6:46 PM + ***************/ + +import ( + "sync" + "sync/atomic" + "testing" + "time" +) + +const ( + saveDuration time.Duration = 3 +) + +var ( + config = StateSaverConfig{SaveReport: true, SaveSchedule: saveDuration * time.Second} +) + +func init() { + SetupFileSystemStateSaver(config) +} + +// Tests that the schedule is run on the timeDuration +func TestUUID_State_saveSchedule(t *testing.T) { + + if state.saver != nil { + count := 0 + + now := time.Now() + state.next = timestamp() + Timestamp(config.SaveSchedule/100) + + for i := 0; i < 20000; i++ { + if timestamp() >= state.next { + count++ + } + NewV1() + time.Sleep(1 * time.Millisecond) + } + d := time.Since(now) + timesSaved := int(d.Seconds()) / int(saveDuration) + if count != timesSaved { + t.Errorf("Should be as many saves as %d second increments but got: %d instead of %d", saveDuration, count, timesSaved) + } + } +} + +// Tests that the schedule saves properly when uuid are called in go routines +func TestUUID_State_saveScheduleGo(t *testing.T) { + + if state.saver != nil { + + size := 5000 + ids := make([]UUID, size) + + var wg sync.WaitGroup + wg.Add(size) + + var count int32 + mutex := &sync.Mutex{} + + now := time.Now() + state.next = timestamp() + Timestamp(config.SaveSchedule/100) + + for i := 0; i < size; i++ { + go func(index int) { + defer wg.Done() + if timestamp() >= state.next { + atomic.AddInt32(&count, 1) + } + u := NewV1() + mutex.Lock() + ids[index] = u + mutex.Unlock() + time.Sleep(100 * time.Nanosecond) + }(i) + } + wg.Wait() + duration := time.Since(now) + + for j := size - 1; j >= 0; j-- { + for k := 0; k < size; k++ { + if k == j { + continue + } + if Equal(ids[j], ids[k]) { + t.Error("Should not create the same V1 UUID", ids[k], ids[j]) + } + } + } + + timesSaved := int(duration.Seconds()) / int(saveDuration) + if int(count) != timesSaved { + t.Errorf("Should be as many saves as %d second increments but got: %d instead of %d", saveDuration, count, timesSaved) + } + } +} diff --git a/Godeps/_workspace/src/github.com/twinj/uuid/state.go b/Godeps/_workspace/src/github.com/twinj/uuid/state.go new file mode 100644 index 000000000..f3920776c --- /dev/null +++ b/Godeps/_workspace/src/github.com/twinj/uuid/state.go @@ -0,0 +1,137 @@ +package uuid + +/**************** + * Date: 14/02/14 + * Time: 7:43 PM + ***************/ + +import ( + "bytes" + "log" + seed "math/rand" + "net" + "sync" +) + + +// **************************************************** State + +func SetupCustomStateSaver(pSaver StateSaver) { + state.Lock() + pSaver.Init(&state) + state.init() + state.Unlock() +} + +// Holds package information about the current +// state of the UUID generator +type State struct { + + // A flag which informs whether to + // randomly create a node id + randomNode bool + + // A flag which informs whether to + // randomly create the sequence + randomSequence bool + + // the last time UUID was saved + past Timestamp + + // the next time the state will be saved + next Timestamp + + // the last node which saved a UUID + node []byte + + // An iterated value to help ensure different + // values across the same domain + sequence uint16 + + sync.Mutex + + // save state interface + saver StateSaver +} + +// Changes the state with current data +// Compares the current found node to the last node stored, +// If they are the same or randomSequence is already set due +// to an earlier read issue then the sequence is randomly generated +// else if there is an issue with the time the sequence is incremented +func (o *State) read(pNow Timestamp, pNode net.HardwareAddr) { + if bytes.Equal([]byte(pNode), o.node) || o.randomSequence { + o.sequence = uint16(seed.Int()) & 0x3FFF + } else if pNow < o.past { + o.sequence++ + } + o.past = pNow + o.node = pNode +} + +func (o *State) persist() { + if o.saver != nil { + o.saver.Save(o) + } +} + +// Initialises the UUID state when the package is first loaded +// it first attempts to decode the file state into State +// if this file does not exist it will create the file and do a flush +// of the random state which gets loaded at package runtime +// second it will attempt to resolve the current hardware address nodeId +// thirdly it will check the state of the clock +func (o *State) init() { + if o.saver != nil { + intfcs, err := net.Interfaces() + if err != nil { + log.Println("uuid.State.init: address error: will generate random node id instead", err) + return + } + a := getHardwareAddress(intfcs) + if a == nil { + log.Println("uuid.State.init: address error: will generate random node id instead", err) + return + } + // Don't use random as we have a real address + o.randomSequence = false + if bytes.Equal([]byte(a), state.node) { + state.sequence++ + } + state.node = a + state.randomNode = false + } +} + +func getHardwareAddress(pInterfaces []net.Interface) net.HardwareAddr { + for _, inter := range pInterfaces { + // Initially I could multicast out the Flags to get + // whether the interface was up but started failing + if (inter.Flags & (1 << net.FlagUp)) != 0 { + //if inter.Flags.String() != "0" { + if addrs, err := inter.Addrs(); err == nil { + for _, addr := range addrs { + if addr.String() != "0.0.0.0" && !bytes.Equal([]byte(inter.HardwareAddr), make([]byte, len(inter.HardwareAddr))) { + return inter.HardwareAddr + } + } + } + } + } + return nil +} + +// *********************************************** StateSaver interface + +// Use this interface to setup a custom state saver if you wish to have +// v1 UUIDs based on your node id and constant time. +type StateSaver interface { + // Init is run if Setup() is false + // Init should setup the system to save the state + Init(*State) + + // Save saves the state and is called only if const V1Save and + // Setup() is true + Save(*State) +} + diff --git a/Godeps/_workspace/src/github.com/twinj/uuid/state_test.go b/Godeps/_workspace/src/github.com/twinj/uuid/state_test.go new file mode 100644 index 000000000..c6651e7f1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/twinj/uuid/state_test.go @@ -0,0 +1,92 @@ +package uuid + +/**************** + * Date: 14/02/14 + * Time: 9:08 PM + ***************/ + +import ( + "bytes" + "fmt" + "net" + "testing" +) + +var state_bytes = []byte{ + 0xAA, 0xCF, 0xEE, 0x12, + 0xD4, 0x00, + 0x27, 0x23, + 0x00, + 0xD3, + 0x23, 0x12, 0x4A, 0x11, 0x89, 0xFF, +} + + +func TestUUID_getHardwareAddress(t *testing.T) { + intfcs, err := net.Interfaces() + if err != nil { + return + } + addr := getHardwareAddress(intfcs) + if addr == nil { + return + } + fmt.Println(addr) +} + +func TestUUID_StateSeed(t *testing.T) { + if state.past < Timestamp((1391463463*10000000)+(100*10)+gregorianToUNIXOffset) { + t.Errorf("Expected a value greater than 02/03/2014 @ 9:37pm in UTC but got %d", state.past) + } + if state.node == nil { + t.Errorf("Expected a non nil node") + } + if state.sequence <= 0 { + t.Errorf("Expected a value greater than but got %d", state.sequence) + } +} + +func TestUUID_State_read(t *testing.T) { + s := new(State) + s.past = Timestamp((1391463463 * 10000000) + (100 * 10) + gregorianToUNIXOffset) + s.node = state_bytes + + now := Timestamp((1391463463 * 10000000) + (100 * 10)) + s.read(now+(100*10), net.HardwareAddr(make([]byte, length))) + if s.sequence != 1 { + t.Error("The sequence should increment when the time is"+ + "older than the state past time and the node"+ + "id are not the same.", s.sequence) + } + s.read(now, net.HardwareAddr(state_bytes)) + + if s.sequence == 1 { + t.Error("The sequence should be randomly generated when"+ + " the nodes are equal.", s.sequence) + } + + s = new(State) + s.past = Timestamp((1391463463 * 10000000) + (100 * 10) + gregorianToUNIXOffset) + s.node = state_bytes + s.randomSequence = true + s.read(now, net.HardwareAddr(make([]byte, length))) + + if s.sequence == 0 { + t.Error("The sequence should be randomly generated when"+ + " the randomSequence flag is set.", s.sequence) + } + + if s.past != now { + t.Error("The past time should equal the time passed in" + + " the method.") + } + + if !bytes.Equal(s.node, make([]byte, length)) { + t.Error("The node id should equal the node passed in" + + " the method.") + } +} + +func TestUUID_State_init(t *testing.T) { + +} diff --git a/Godeps/_workspace/src/github.com/twinj/uuid/struct.go b/Godeps/_workspace/src/github.com/twinj/uuid/struct.go new file mode 100644 index 000000000..4f2fba8bd --- /dev/null +++ b/Godeps/_workspace/src/github.com/twinj/uuid/struct.go @@ -0,0 +1,103 @@ +package uuid + +/**************** + * Date: 31/01/14 + * Time: 3:34 PM + ***************/ + +import "net" + +// Struct is used for RFC4122 Version 1 UUIDs +type Struct struct { + timeLow uint32 + timeMid uint16 + timeHiAndVersion uint16 + sequenceHiAndVariant byte + sequenceLow byte + node []byte + size int +} + +func (o Struct) Size() int { + return o.size +} + +func (o Struct) Version() int { + return int(o.timeHiAndVersion >> 12) +} + +func (o Struct) Variant() byte { + return variant(o.sequenceHiAndVariant) +} + +// Sets the four most significant bits (bits 12 through 15) of the +// timeHiAndVersion field to the 4-bit version number. +func (o *Struct) setVersion(pVersion int) { + o.timeHiAndVersion &= 0x0FFF + o.timeHiAndVersion |= (uint16(pVersion) << 12) +} + +func (o *Struct) setVariant(pVariant byte) { + setVariant(&o.sequenceHiAndVariant, pVariant) +} + +func (o *Struct) Unmarshal(pData []byte) { + o.timeLow = uint32(pData[3]) | uint32(pData[2])<<8 | uint32(pData[1])<<16 | uint32(pData[0])<<24 + o.timeMid = uint16(pData[5]) | uint16(pData[4])<<8 + o.timeHiAndVersion = uint16(pData[7]) | uint16(pData[6])<<8 + o.sequenceHiAndVariant = pData[8] + o.sequenceLow = pData[9] + o.node = pData[10:o.Size()] +} + +func (o *Struct) Bytes() (data []byte) { + data = []byte{ + byte(o.timeLow >> 24), byte(o.timeLow >> 16), byte(o.timeLow >> 8), byte(o.timeLow), + byte(o.timeMid >> 8), byte(o.timeMid), + byte(o.timeHiAndVersion >> 8), byte(o.timeHiAndVersion), + } + data = append(data, o.sequenceHiAndVariant) + data = append(data, o.sequenceLow) + data = append(data, o.node...) + return +} + +// Marshals the UUID bytes into a slice +func (o *Struct) MarshalBinary() ([]byte, error) { + return o.Bytes(), nil +} + +// Un-marshals the data bytes into the UUID struct. +// Implements the BinaryUn-marshaller interface +func (o *Struct) UnmarshalBinary(pData []byte) error { + return UnmarshalBinary(o, pData) +} + +func (o Struct) String() string { + return formatter(&o, format) +} + +func (o Struct) Format(pFormat string) string { + return formatter(&o, pFormat) +} + +// Set the three most significant bits (bits 0, 1 and 2) of the +// sequenceHiAndVariant to variant mask 0x80. +func (o *Struct) setRFC4122Variant() { + o.sequenceHiAndVariant &= variantSet + o.sequenceHiAndVariant |= ReservedRFC4122 +} + +// Unmarshals data into struct for V1 UUIDs +func newV1(pNow Timestamp, pVersion uint16, pVariant byte, pNode net.HardwareAddr) UUID { + o := new(Struct) + o.timeLow = uint32(pNow & 0xFFFFFFFF) + o.timeMid = uint16((pNow >> 32) & 0xFFFF) + o.timeHiAndVersion = uint16((pNow >> 48) & 0x0FFF) + o.timeHiAndVersion |= uint16(pVersion << 12) + o.sequenceLow = byte(state.sequence & 0xFF) + o.sequenceHiAndVariant = byte((state.sequence & 0x3F00) >> 8) + o.sequenceHiAndVariant |= pVariant + o.node = pNode + return o +} diff --git a/Godeps/_workspace/src/github.com/twinj/uuid/struct_test.go b/Godeps/_workspace/src/github.com/twinj/uuid/struct_test.go new file mode 100644 index 000000000..6ee33e46e --- /dev/null +++ b/Godeps/_workspace/src/github.com/twinj/uuid/struct_test.go @@ -0,0 +1,32 @@ +package uuid + +/**************** + * Date: 15/02/14 + * Time: 12:26 PM + ***************/ + +import ( + "testing" +) + +var struct_bytes = []byte{ + 0xAA, 0xCF, 0xEE, 0x12, + 0xD4, 0x00, + 0x27, 0x23, + 0x00, + 0xD3, + 0x23, 0x12, 0x4A, 0x11, 0x89, 0xFF, +} + +func TestUUID_Struct_UnmarshalBinary(t *testing.T) { + u := new(Struct) + u.size = length + err := u.UnmarshalBinary([]byte{1, 2, 3, 4, 5}) + if err == nil { + t.Errorf("Expected error due to invalid byte length") + } + err = u.UnmarshalBinary(struct_bytes) + if err != nil { + t.Errorf("Expected bytes") + } +} diff --git a/Godeps/_workspace/src/github.com/twinj/uuid/timestamp.go b/Godeps/_workspace/src/github.com/twinj/uuid/timestamp.go new file mode 100644 index 000000000..616428745 --- /dev/null +++ b/Godeps/_workspace/src/github.com/twinj/uuid/timestamp.go @@ -0,0 +1,81 @@ +package uuid + +/**************** + * Date: 14/02/14 + * Time: 7:46 PM + ***************/ + +import ( + "time" +) + +const ( + // A tick is 100 ns + ticksPerSecond = 10000000 + + // Difference between + gregorianToUNIXOffset uint64 = 0x01B21DD213814000 + + // set the following to the number of 100ns ticks of the actual + // resolution of your system's clock + idsPerTimestamp = 1024 +) + +var ( + lastTimestamp Timestamp + idsThisTimestamp = idsPerTimestamp +) + +// ********************************************** Timestamp + +type Timestamp uint64 + +// TODO Create c version same as package runtime and time +func Now() (sec int64, nsec int32) { + t := time.Now() + sec = t.Unix() + nsec = int32(t.Nanosecond()) + return +} + +// Converts Unix formatted time to RFC4122 UUID formatted times +// UUID UTC base time is October 15, 1582. +// Unix base time is January 1, 1970. +// Converts time to 100 nanosecond ticks since epoch +// There are 1000000000 nanoseconds in a second, +// 1000000000 / 100 = 10000000 tiks per second +func timestamp() Timestamp { + sec, nsec := Now() + return Timestamp(uint64(sec)*ticksPerSecond + + uint64(nsec)/100 + gregorianToUNIXOffset) +} + +func (o Timestamp) Unix() time.Time { + t := uint64(o) - gregorianToUNIXOffset + return time.Unix(0, int64(t*100)) +} + +// Get time as 60-bit 100ns ticks since UUID epoch. +// Compensate for the fact that real clock resolution is +// less than 100ns. +func currentUUIDTimestamp() Timestamp { + var timeNow Timestamp + for { + timeNow = timestamp() + + // if clock reading changed since last UUID generated + if lastTimestamp != timeNow { + // reset count of UUIDs with this timestamp + idsThisTimestamp = 0 + lastTimestamp = timeNow + break + } + if idsThisTimestamp < idsPerTimestamp { + idsThisTimestamp++ + break + } + // going too fast for the clock; spin + } + // add the count of UUIDs to low order bits of the clock reading + return timeNow + Timestamp(idsThisTimestamp) +} diff --git a/Godeps/_workspace/src/github.com/twinj/uuid/timestamp_test.go b/Godeps/_workspace/src/github.com/twinj/uuid/timestamp_test.go new file mode 100644 index 000000000..b8cb87355 --- /dev/null +++ b/Godeps/_workspace/src/github.com/twinj/uuid/timestamp_test.go @@ -0,0 +1,20 @@ +package uuid + +/**************** + * Date: 15/02/14 + * Time: 12:19 PM + ***************/ + +import ( + "testing" +) + +func TestUUID_Timestamp_now(t *testing.T) { + sec, nsec := Now() + if sec <= 1391463463 { + t.Errorf("Expected a value greater than 02/03/2014 @ 9:37pm in UTC but got %d", sec) + } + if nsec == 0 { + t.Error("Expected a value") + } +} diff --git a/Godeps/_workspace/src/github.com/twinj/uuid/uuid_test.go b/Godeps/_workspace/src/github.com/twinj/uuid/uuid_test.go new file mode 100644 index 000000000..7236be989 --- /dev/null +++ b/Godeps/_workspace/src/github.com/twinj/uuid/uuid_test.go @@ -0,0 +1,119 @@ +package uuid_test + +import ( + "fmt" + "github.com/twinj/uuid" + "testing" + "time" +) + +const ( + print = "version %d variant %x: %s\n" +) + +func Test_AllVersions(t *testing.T) { + Test_NewV1(nil) + Test_NewV3(nil) + Test_NewV4(nil) + Test_NewV5(nil) +} + +func Test_NewV1(t *testing.T) { + u1 := uuid.NewV1() + fmt.Printf(print, u1.Version(), u1.Variant(), u1) +} + +func Test_NewV3(t *testing.T) { + u, _ := uuid.Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + u3 := uuid.NewV3(u, uuid.Name("test")) + fmt.Printf(print, u3.Version(), u3.Variant(), u3) +} + +func Test_NewV4(t *testing.T) { + u4 := uuid.NewV4() + fmt.Printf(print, u4.Version(), u4.Variant(), u4) +} + +func Test_NewV5(t *testing.T) { + u5 := uuid.NewV5(uuid.NamespaceURL, uuid.Name("test")) + fmt.Printf(print, u5.Version(), u5.Variant(), u5) +} + +func Test_Parse(t *testing.T) { + u, err := uuid.Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + if err != nil { + fmt.Println("error:", err) + return + } + fmt.Println(u) +} + +func Example() { + var config = uuid.StateSaverConfig{SaveReport: true, SaveSchedule: 30 * time.Minute} + uuid.SetupFileSystemStateSaver(config) + u1 := uuid.NewV1() + fmt.Printf("version %d variant %x: %s\n", u1.Version(), u1.Variant(), u1) + + uP, _ := uuid.Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + u3 := uuid.NewV3(uP, uuid.Name("test")) + + u4 := uuid.NewV4() + fmt.Printf("version %d variant %x: %s\n", u4.Version(), u4.Variant(), u4) + + u5 := uuid.NewV5(uuid.NamespaceURL, uuid.Name("test")) + + if uuid.Equal(u1, u3) { + fmt.Printf("Will never happen") + } + + fmt.Printf(uuid.Formatter(u5, uuid.CurlyHyphen)) + + uuid.SwitchFormat(uuid.BracketHyphen) +} + +func ExampleNewV1() { + u1 := uuid.NewV1() + fmt.Printf(print, u1.Version(), u1.Variant(), u1) +} + +func ExampleNewV3() { + u, _ := uuid.Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + u3 := uuid.NewV3(u, uuid.Name("test")) + fmt.Printf("version %d variant %x: %s\n", u3.Version(), u3.Variant(), u3) +} + +func ExampleNewV4() { + u4 := uuid.NewV4() + fmt.Printf("version %d variant %x: %s\n", u4.Version(), u4.Variant(), u4) +} + +func ExampleNewV5() { + u5 := uuid.NewV5(uuid.NamespaceURL, uuid.Name("test")) + fmt.Printf("version %d variant %x: %s\n", u5.Version(), u5.Variant(), u5) +} + +func ExampleParse() { + u, err := uuid.Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + if err != nil { + fmt.Println("error:", err) + } + fmt.Println(u) +} + +func ExampleSetupFileSystemStateSaver() { + var config = uuid.StateSaverConfig{SaveReport: true, SaveSchedule: 30 * time.Minute} + uuid.SetupFileSystemStateSaver(config) + u1 := uuid.NewV1() + fmt.Printf("version %d variant %x: %s\n", u1.Version(), u1.Variant(), u1) +} + +func ExampleFormatter() { + u4 := uuid.NewV4() + fmt.Printf(uuid.Formatter(u4, uuid.CurlyHyphen)) +} + +func ExampleSwitchFormat() { + uuid.SwitchFormat(uuid.BracketHyphen) + u4 := uuid.NewV4() + fmt.Printf("version %d variant %x: %s\n", u4.Version(), u4.Variant(), u4) +} diff --git a/Godeps/_workspace/src/github.com/twinj/uuid/uuids.go b/Godeps/_workspace/src/github.com/twinj/uuid/uuids.go new file mode 100644 index 000000000..846ff05e9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/twinj/uuid/uuids.go @@ -0,0 +1,296 @@ +// This package provides RFC4122 UUIDs. +// +// NewV1, NewV3, NewV4, NewV5, for generating versions 1, 3, 4 +// and 5 UUIDs as specified in RFC-4122. +// +// New([]byte), unsafe; NewHex(string); and Parse(string) for +// creating UUIDs from existing data. +// +// The original version was from Krzysztof Kowalik +// Unfortunately, that version was non compliant with RFC4122. +// I forked it but have since heavily redesigned it. +// +// The example code in the specification was also used as reference +// for design. +// +// Copyright (C) 2014 twinj@github.com 2014 MIT style licence +package uuid + +/**************** + * Date: 31/01/14 + * Time: 3:35 PM + ***************/ + +import ( + "encoding" + "encoding/hex" + "errors" + "fmt" + "hash" + "regexp" + "strings" + "bytes" +) + +const ( + ReservedNCS byte = 0x00 + ReservedRFC4122 byte = 0x80 // or and A0 if masked with 1F + ReservedMicrosoft byte = 0xC0 + ReservedFuture byte = 0xE0 + TakeBack byte = 0xF0 +) + +const ( + + // Pattern used to parse string representation of the UUID. + // Current one allows to parse string where only one opening + // or closing bracket or any of the hyphens are optional. + // It is only used to extract the main bytes to create a UUID, + // so these imperfections are of no consequence. + hexPattern = `^(urn\:uuid\:)?[\{(\[]?([A-Fa-f0-9]{8})-?([A-Fa-f0-9]{4})-?([1-5][A-Fa-f0-9]{3})-?([A-Fa-f0-9]{4})-?([A-Fa-f0-9]{12})[\]\})]?$` +) + +var ( + parseUUIDRegex = regexp.MustCompile(hexPattern) + format string +) + +func init() { + SwitchFormat(CleanHyphen) +} + +// ****************************************************** UUID + +// The main interface for UUIDs +// Each implementation must also implement the UniqueName interface +type UUID interface { + encoding.BinaryMarshaler + encoding.BinaryUnmarshaler + + // Marshals the UUID bytes or data + Bytes() (data []byte) + + // Organises data into a new UUID + Unmarshal(pData []byte) + + // Size is used where different implementations require + // different sizes. Should return the number of bytes in + // the implementation. + // Enables unmarshal and Bytes to screen for size + Size() int + + // Version returns a version number of the algorithm used + // to generate the UUID. + // This may may behave independently across non RFC4122 UUIDs + Version() int + + // Variant returns the UUID Variant + // This will be one of the constants: + // ReservedRFC4122, + // ReservedMicrosoft, + // ReservedFuture, + // ReservedNCS. + // This may behave differently across non RFC4122 UUIDs + Variant() byte + + // UUID can be used as a Name within a namespace + // Is simply just a String() string method + // Returns a formatted version of the UUID. + String() string +} + +// New creates a UUID from a slice of bytes. +// Truncates any bytes past the default length of 16 +// Will panic if data slice is too small. +func New(pData []byte) UUID { + o := new(Array) + o.Unmarshal(pData[:length]) + return o +} + + +// Creates a UUID from a hex string +// Will panic if hex string is invalid - will panic even with hyphens and brackets +// Expects a clean string use Parse otherwise. +func NewHex(pUuid string) UUID { + bytes, err := hex.DecodeString(pUuid) + if err != nil { + panic(err) + } + return New(bytes) +} + +// Parse creates a UUID from a valid string representation. +// Accepts UUID string in following formats: +// 6ba7b8149dad11d180b400c04fd430c8 +// 6ba7b814-9dad-11d1-80b4-00c04fd430c8 +// {6ba7b814-9dad-11d1-80b4-00c04fd430c8} +// urn:uuid:6ba7b814-9dad-11d1-80b4-00c04fd430c8 +// [6ba7b814-9dad-11d1-80b4-00c04fd430c8] +// +func Parse(pUUID string) (UUID, error) { + md := parseUUIDRegex.FindStringSubmatch(pUUID) + if md == nil { + return nil, errors.New("uuid.Parse: invalid string") + } + return NewHex(md[2] + md[3] + md[4] + md[5] + md[6]), nil +} + +// Digest a namespace UUID and a UniqueName, which then marshals to +// a new UUID +func Digest(o, pNs UUID, pName UniqueName, pHash hash.Hash) { + // Hash writer never returns an error + pHash.Write(pNs.Bytes()) + pHash.Write([]byte(pName.String())) + o.Unmarshal(pHash.Sum(nil)[:o.Size()]) +} + +// Function provides a safe way to unmarshal bytes into an +// existing UUID. +// Checks for length. +func UnmarshalBinary(o UUID, pData []byte) error { + if len(pData) != o.Size() { + return errors.New("uuid.UnmarshalBinary: invalid length") + } + o.Unmarshal(pData) + return nil +} + +// ********************************************** UUID Names + +// A UUID Name is a simple string which implements UniqueName +// which satisfies the Stringer interface. +type Name string + +// Returns the name as a string. Satisfies the Stringer interface. +func (o Name) String() string { + return string(o) +} + +// NewName will create a unique name from several sources +func NewName(salt string, pNames ...UniqueName) UniqueName { + var s string + for _, s2 := range pNames { + s += s2.String() + } + return Name(s + salt) +} + +// UniqueName is a Stinger interface +// Made for easy passing of IPs, URLs, the several Address types, +// Buffers and any other type which implements Stringer +// string, []byte types and Hash sums will need to be cast to +// the Name type or some other type which implements +// Stringer or UniqueName +type UniqueName interface { + + // Many go types implement this method for use with printing + // Will convert the current type to its native string format + String() string +} + +// ********************************************** UUID Printing + +// A Format is a pattern used by the stringer interface with which to print +// the UUID. +type Format string + +const ( + Clean Format = "%x%x%x%x%x%x" + Curly Format = "{%x%x%x%x%x%x}" + Bracket Format = "(%x%x%x%x%x%x)" + + // This is the default format. + CleanHyphen Format = "%x-%x-%x-%x%x-%x" + + CurlyHyphen Format = "{%x-%x-%x-%x%x-%x}" + BracketHyphen Format = "(%x-%x-%x-%x%x-%x)" + GoIdFormat Format = "[%X-%X-%x-%X%X-%x]" +) + +// Gets the current default format pattern +func GetFormat() string { + return format +} + +// Switches the default printing format for ALL UUID strings +// A valid format will have 6 groups if the supplied Format does not +func SwitchFormat(pFormat Format) { + form := string(pFormat) + if strings.Count(form, "%") != 6 { + panic(errors.New("uuid.switchFormat: invalid formatting")) + } + format = form +} + +// Same as SwitchFormat but will make it uppercase +func SwitchFormatUpperCase(pFormat Format) { + form := strings.ToUpper(string(pFormat)) + SwitchFormat(Format(form)) +} + +// Compares whether each UUID is the same +func Equal(p1 UUID, p2 UUID) bool { + return bytes.Equal(p1.Bytes(), p2.Bytes()) +} + +// Format a UUID into a human readable string which matches the given Format +// Use this for one time formatting when setting the default using SwitchFormat +// is overkill. +func Formatter(pUUID UUID, pFormat Format) string { + form := string(pFormat) + if strings.Count(form, "%") != 6 { + panic(errors.New("uuid.Formatter: invalid formatting")) + } + return formatter(pUUID, form) +} + +// ********************************************** UUID Versions + +type UUIDVersion int + +const ( + NONE UUIDVersion = iota + RFC4122v1 + DunnoYetv2 + RFC4122v3 + RFC4122v4 + RFC4122v5 +) + +// *************************************************** Helpers + +// Retrieves the variant from the given byte +func variant(pVariant byte) byte { + switch pVariant & variantGet { + case ReservedRFC4122, 0xA0: + return ReservedRFC4122 + case ReservedMicrosoft: + return ReservedMicrosoft + case ReservedFuture: + return ReservedFuture + } + return ReservedNCS +} + +// not strictly required +func setVariant(pByte *byte, pVariant byte) { + switch pVariant { + case ReservedRFC4122: + *pByte &= variantSet + case ReservedFuture, ReservedMicrosoft: + *pByte &= 0x1F + case ReservedNCS: + *pByte &= 0x7F + default: + panic(errors.New("uuid.setVariant: invalid variant mask")) + } + *pByte |= pVariant +} + +// format a UUID into a human readable string +func formatter(pUUID UUID, pFormat string) string { + b := pUUID.Bytes() + return fmt.Sprintf(pFormat, b[0:4], b[4:6], b[6:8], b[8:9], b[9:10], b[10:pUUID.Size()]) +} + diff --git a/Godeps/_workspace/src/github.com/twinj/uuid/uuids_test.go b/Godeps/_workspace/src/github.com/twinj/uuid/uuids_test.go new file mode 100644 index 000000000..ca212175e --- /dev/null +++ b/Godeps/_workspace/src/github.com/twinj/uuid/uuids_test.go @@ -0,0 +1,449 @@ +package uuid + +/**************** + * Date: 3/02/14 + * Time: 10:59 PM + ***************/ + +import ( + "crypto/md5" + "crypto/sha1" + "fmt" + "regexp" + "strings" + "testing" +) + +const ( + secondsPerHour = 60 * 60 + secondsPerDay = 24 * secondsPerHour + unixToInternal int64 = (1969*365 + 1969/4 - 1969/100 + 1969/400) * secondsPerDay + internalToUnix int64 = -unixToInternal +) + +var ( + uuid_goLang Name = "https://google.com/golang.org?q=golang" + printer bool = false + uuid_bytes = []byte{ + 0xAA, 0xCF, 0xEE, 0x12, + 0xD4, 0x00, + 0x27, 0x23, + 0x00, + 0xD3, + 0x23, 0x12, 0x4A, 0x11, 0x89, 0xFF, + } + uuid_variants = []byte{ + ReservedNCS, ReservedRFC4122, ReservedMicrosoft, ReservedFuture, + } + namespaceUuids = []UUID{ + NamespaceDNS, NamespaceURL, NamespaceOID, NamespaceX500, + } + invalidHexStrings = [...]string{ + "foo", + "6ba7b814-9dad-11d1-80b4-", + "6ba7b814--9dad-11d1-80b4--00c04fd430c8", + "6ba7b814-9dad7-11d1-80b4-00c04fd430c8999", + "{6ba7b814-9dad-1180b4-00c04fd430c8", + "{6ba7b814--11d1-80b4-00c04fd430c8}", + "urn:uuid:6ba7b814-9dad-1666666680b4-00c04fd430c8", + } + validHexStrings = [...]string{ + "6ba7b8149dad-11d1-80b4-00c04fd430c8}", + "{6ba7b8149dad-11d1-80b400c04fd430c8}", + "{6ba7b814-9dad11d180b400c04fd430c8}", + "6ba7b8149dad-11d1-80b4-00c04fd430c8", + "6ba7b814-9dad11d1-80b4-00c04fd430c8", + "6ba7b814-9dad-11d180b4-00c04fd430c8", + "6ba7b814-9dad-11d1-80b400c04fd430c8", + "6ba7b8149dad11d180b400c04fd430c8", + "6ba7b814-9dad-11d1-80b4-00c04fd430c8", + "{6ba7b814-9dad-11d1-80b4-00c04fd430c8}", + "{6ba7b814-9dad-11d1-80b4-00c04fd430c8", + "6ba7b814-9dad-11d1-80b4-00c04fd430c8}", + "(6ba7b814-9dad-11d1-80b4-00c04fd430c8)", + "urn:uuid:6ba7b814-9dad-11d1-80b4-00c04fd430c8", + } +) + +func TestUUID_Simple(t *testing.T) { + u := NewV1() + outputLn(u) + + u, _ = Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + u = NewV3(u, Name("test")) + outputLn(u) + if !strings.EqualFold("45a113ac-c7f2-30b0-90a5-a399ab912716", u.String()) { + t.Errorf("Expected string representation to be %s, but got: %s", "45a113ac-c7f2-30b0-90a5-a399ab912716", u.String()) + } + + u = NewV4() + outputLn(u) + + u = NewV5(u, Name("test")) + outputLn(u) +} + +func TestUUID_New(t *testing.T) { + u := New(uuid_bytes) + if u == nil { + t.Error("Expected a valid UUID") + } + if u.Version() != 2 { + t.Errorf("Expected correct version %d, but got %d", 2, u.Version()) + } + if u.Variant() != ReservedNCS { + t.Errorf("Expected ReservedNCS variant %x, but got %x", ReservedNCS, u.Variant()) + } + if !parseUUIDRegex.MatchString(u.String()) { + t.Errorf("Expected string representation to be valid, given: %s", u.String()) + } +} + +func TestUUID_NewBulk(t *testing.T) { + for i := 0; i < 1000000; i++ { + New(uuid_bytes) + } +} + + +const ( + clean = `[A-Fa-f0-9]{8}[A-Fa-f0-9]{4}[1-5fF][A-Fa-f0-9]{3}[A-Fa-f0-9]{4}[A-Fa-f0-9]{12}` + cleanHexPattern = `^` + clean + `$` + curlyHexPattern = `^\{` + clean + `\}$` + bracketHexPattern = `^\(` + clean + `\)$` + hyphen = `[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[1-5fF][A-Fa-f0-9]{3}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}` + cleanHyphenHexPattern = `^` + hyphen + `$` + curlyHyphenHexPattern = `^\{` + hyphen + `\}$` + bracketHyphenHexPattern = `^\(` + hyphen + `\)$` + goIdHexPattern = `^\[[A-F0-9]{8}-[A-F0-9]{4}-[1-5fF][a-f0-9]{3}-[A-F0-9]{4}-[a-f0-9]{12}\]$` +) + +func TestUUID_Formats_String(t *testing.T) { + ids := []UUID{NewV4(), NewV1()} + + // Reset default + SwitchFormat(CleanHyphen) + + for _, u := range ids { + + SwitchFormatUpperCase(CurlyHyphen) + if !regexp.MustCompile(curlyHyphenHexPattern).MatchString(u.String()) { + t.Error("Curly hyphen UUID string output got", u, u.Version()) + } + outputLn(u) + + // Clean + SwitchFormat(Clean) + if !regexp.MustCompile(cleanHexPattern).MatchString(u.String()) { + t.Error("Clean UUID string output got", u, u.Version()) + } + outputLn(u) + + // Curly + SwitchFormat(Curly) + if !regexp.MustCompile(curlyHexPattern).MatchString(u.String()) { + t.Error("Curly clean UUID string output got", u, u.Version()) + } + outputLn(u) + + // Bracket + SwitchFormat(Bracket) + if !regexp.MustCompile(bracketHexPattern).MatchString(u.String()) { + t.Error("Bracket clean UUID string output got", u, u.Version()) + } + outputLn(u) + + // Clean Hyphen + SwitchFormat(CleanHyphen) + if !regexp.MustCompile(cleanHyphenHexPattern).MatchString(u.String()) { + t.Error("Clean hyphen UUID string output got", u, u.Version()) + } + outputLn(u) + + // Bracket Hyphen + SwitchFormat(BracketHyphen) + if !regexp.MustCompile(bracketHyphenHexPattern).MatchString(u.String()) { + t.Error("Bracket hyphen UUID string output got", u, u.Version()) + } + outputLn(u) + + // Bracket Hyphen + SwitchFormat(GoIdFormat) + if !regexp.MustCompile(goIdHexPattern).MatchString(u.String()) { + t.Error("GoId UUID string output expected", u, u.Version()) + } + outputLn(u) + + // Reset default + SwitchFormat(CleanHyphen) + } +} + +func TestUUID_Formatter(t *testing.T) { + ids := []UUID{NewV4(), NewV1()} + + for _, u := range ids { + // CurlyHyphen - default + if !regexp.MustCompile(curlyHyphenHexPattern).MatchString(Formatter(u, CurlyHyphen)) { + t.Error("Curly hyphen UUID string output got", Formatter(u, CurlyHyphen)) + } + outputLn(Formatter(u, CurlyHyphen)) + + // Clean + if !regexp.MustCompile(cleanHexPattern).MatchString(Formatter(u, Clean)) { + t.Error("Clean UUID string output got", Formatter(u, Clean)) + } + outputLn(Formatter(u, Clean)) + + // Curly + if !regexp.MustCompile(curlyHexPattern).MatchString(Formatter(u, Curly)) { + t.Error("Curly clean UUID string output", Formatter(u, Curly)) + } + outputLn(Formatter(u, Curly)) + + // Bracket + if !regexp.MustCompile(bracketHexPattern).MatchString(Formatter(u, Bracket)) { + t.Error("Bracket clean UUID string output", Formatter(u, Bracket)) + } + outputLn(Formatter(u, Bracket)) + + // Clean Hyphen + if !regexp.MustCompile(cleanHyphenHexPattern).MatchString(Formatter(u, CleanHyphen)) { + t.Error("Clean hyphen UUID string output", Formatter(u, CleanHyphen)) + } + outputLn(Formatter(u, CleanHyphen)) + + // Bracket Hyphen + if !regexp.MustCompile(bracketHyphenHexPattern).MatchString(Formatter(u, BracketHyphen)) { + t.Error("Bracket hyphen UUID string output", Formatter(u, BracketHyphen)) + } + outputLn(Formatter(u, BracketHyphen)) + + // GoId Format + if !regexp.MustCompile(goIdHexPattern).MatchString(Formatter(u, GoIdFormat)) { + t.Error("GoId UUID string output expected", Formatter(u, GoIdFormat)) + } + outputLn(Formatter(u, GoIdFormat)) + } +} + +func TestUUID_NewHex(t *testing.T) { + s := "f3593cffee9240df408687825b523f13" + u := NewHex(s) + if u == nil { + t.Error("Expected a valid UUID") + } + if u.Version() != 4 { + t.Errorf("Expected correct version %d, but got %d", 4, u.Version()) + } + if u.Variant() != ReservedNCS { + t.Errorf("Expected ReservedNCS variant %x, but got %x", ReservedNCS, u.Variant()) + } + if !parseUUIDRegex.MatchString(u.String()) { + t.Errorf("Expected string representation to be valid, given: %s", u.String()) + } +} + +func TestUUID_NewHexBulk(t *testing.T) { + for i := 0; i < 1000000; i++ { + s := "f3593cffee9240df408687825b523f13" + NewHex(s) + } +} + +func TestUUID_Parse(t *testing.T) { + for _, v := range invalidHexStrings { + _, err := Parse(v) + if err == nil { + t.Error("Expected error due to invalid UUID string:", v) + } + } + for _, v := range validHexStrings { + _, err := Parse(v) + if err != nil { + t.Error("Expected valid UUID string but got error:", v) + } + } + for _, id := range namespaceUuids { + _, err := Parse(id.String()) + if err != nil { + t.Error("Expected valid UUID string but got error:", err) + } + } +} + +func TestUUID_Sum(t *testing.T) { + u := new(Array) + Digest(u, NamespaceDNS, uuid_goLang, md5.New()) + if u.Bytes() == nil { + t.Error("Expected new data in bytes") + } + output(u.Bytes()) + u = new(Array) + Digest(u, NamespaceDNS, uuid_goLang, sha1.New()) + if u.Bytes() == nil { + t.Error("Expected new data in bytes") + } + output(u.Bytes()) +} + +// Tests all possible version numbers and that +// each number returned is the same +func TestUUID_Struct_VersionBits(t *testing.T) { + uStruct := new(Struct) + uStruct.size = length + for v := 0; v < 16; v++ { + for i := 0; i <= 255; i++ { + uuid_bytes[versionIndex] = byte(i) + uStruct.Unmarshal(uuid_bytes) + uStruct.setVersion(v) + output(uStruct) + if uStruct.Version() != v { + t.Errorf("%x does not resolve to %x", byte(uStruct.Version()), v) + } + output("\n") + } + } +} + +// Tests all possible variants with their respective bits set +// Tests whether the expected output comes out on each byte case +func TestUUID_Struct_VariantBits(t *testing.T) { + for _, v := range uuid_variants { + for i := 0; i <= 255; i++ { + uuid_bytes[variantIndex] = byte(i) + + uStruct := createStruct(uuid_bytes, 4, v) + b := uStruct.sequenceHiAndVariant >> 4 + tVariantConstraint(v, b, uStruct, t) + + if uStruct.Variant() != v { + t.Errorf("%d does not resolve to %x: get %x", i, v, uStruct.Variant()) + } + } + } +} + +// Tests all possible variants with their respective bits set +// Tests whether the expected output comes out on each byte case +func TestUUID_Array_VariantBits(t *testing.T) { + for _, v := range uuid_variants { + for i := 0; i <= 255; i++ { + uuid_bytes[variantIndex] = byte(i) + + uArray := createArray(uuid_bytes, 4, v) + b := uArray[variantIndex] >> 4 + tVariantConstraint(v, b, uArray, t) + + if uArray.Variant() != v { + t.Errorf("%d does not resolve to %x", i, v) + } + } + } +} + +// Tests all possible version numbers and that +// each number returned is the same +func TestUUID_Array_VersionBits(t *testing.T) { + uArray := new(Array) + for v := 0; v < 16; v++ { + for i := 0; i <= 255; i++ { + uuid_bytes[versionIndex] = byte(i) + uArray.Unmarshal(uuid_bytes) + uArray.setVersion(v) + output(uArray) + if uArray.Version() != v { + t.Errorf("%x does not resolve to %x", byte(uArray.Version()), v) + } + output("\n") + } + } +} + +func BenchmarkUUID_Parse(b *testing.B) { + s := "f3593cff-ee92-40df-4086-87825b523f13" + for i := 0; i < b.N; i++ { + _, err := Parse(s) + if err != nil { + b.Fatal(err) + } + } + b.StopTimer() + b.ReportAllocs() +} + +// ******************************************************* + +func createStruct(pData []byte, pVersion int, pVariant byte) *Struct { + o := new(Struct) + o.size = length + o.Unmarshal(pData) + o.setVersion(pVersion) + o.setVariant(pVariant) + return o +} + +func createArray(pData []byte, pVersion int, pVariant byte) *Array { + o := new(Array) + o.Unmarshal(pData) + o.setVersion(pVersion) + o.setVariant(pVariant) + return o +} + +func tVariantConstraint(v byte, b byte, o UUID, t *testing.T) { + output(o) + switch v { + case ReservedNCS: + switch b { + case 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07: + outputF(": %X ", b) + break + default: + t.Errorf("%X most high bits do not resolve to 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07", b) + } + case ReservedRFC4122: + switch b { + case 0x08, 0x09, 0x0A, 0x0B: + outputF(": %X ", b) + break + default: + t.Errorf("%X most high bits do not resolve to 0x08, 0x09, 0x0A, 0x0B", b) + } + case ReservedMicrosoft: + switch b { + case 0x0C, 0x0D: + outputF(": %X ", b) + break + default: + t.Errorf("%X most high bits do not resolve to 0x0C, 0x0D", b) + } + case ReservedFuture: + switch b { + case 0x0E, 0x0F: + outputF(": %X ", b) + break + default: + t.Errorf("%X most high bits do not resolve to 0x0E, 0x0F", b) + } + } + output("\n") +} + +func output(a ...interface{}) { + if printer { + fmt.Print(a...) + } +} + +func outputLn(a ...interface{}) { + if printer { + fmt.Println(a...) + } +} + +func outputF(format string, a ...interface{}) { + if printer { + fmt.Printf(format, a) + } +} diff --git a/Godeps/_workspace/src/golang.org/x/net/internal/timeseries/timeseries.go b/Godeps/_workspace/src/golang.org/x/net/internal/timeseries/timeseries.go new file mode 100644 index 000000000..3f90b7300 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/net/internal/timeseries/timeseries.go @@ -0,0 +1,525 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package timeseries implements a time series structure for stats collection. +package timeseries + +import ( + "fmt" + "log" + "time" +) + +const ( + timeSeriesNumBuckets = 64 + minuteHourSeriesNumBuckets = 60 +) + +var timeSeriesResolutions = []time.Duration{ + 1 * time.Second, + 10 * time.Second, + 1 * time.Minute, + 10 * time.Minute, + 1 * time.Hour, + 6 * time.Hour, + 24 * time.Hour, // 1 day + 7 * 24 * time.Hour, // 1 week + 4 * 7 * 24 * time.Hour, // 4 weeks + 16 * 7 * 24 * time.Hour, // 16 weeks +} + +var minuteHourSeriesResolutions = []time.Duration{ + 1 * time.Second, + 1 * time.Minute, +} + +// An Observable is a kind of data that can be aggregated in a time series. +type Observable interface { + Multiply(ratio float64) // Multiplies the data in self by a given ratio + Add(other Observable) // Adds the data from a different observation to self + Clear() // Clears the observation so it can be reused. + CopyFrom(other Observable) // Copies the contents of a given observation to self +} + +// Float attaches the methods of Observable to a float64. +type Float float64 + +// NewFloat returns a Float. +func NewFloat() Observable { + f := Float(0) + return &f +} + +// String returns the float as a string. +func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) } + +// Value returns the float's value. +func (f *Float) Value() float64 { return float64(*f) } + +func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) } + +func (f *Float) Add(other Observable) { + o := other.(*Float) + *f += *o +} + +func (f *Float) Clear() { *f = 0 } + +func (f *Float) CopyFrom(other Observable) { + o := other.(*Float) + *f = *o +} + +// A Clock tells the current time. +type Clock interface { + Time() time.Time +} + +type defaultClock int + +var defaultClockInstance defaultClock + +func (defaultClock) Time() time.Time { return time.Now() } + +// Information kept per level. Each level consists of a circular list of +// observations. The start of the level may be derived from end and the +// len(buckets) * sizeInMillis. +type tsLevel struct { + oldest int // index to oldest bucketed Observable + newest int // index to newest bucketed Observable + end time.Time // end timestamp for this level + size time.Duration // duration of the bucketed Observable + buckets []Observable // collections of observations + provider func() Observable // used for creating new Observable +} + +func (l *tsLevel) Clear() { + l.oldest = 0 + l.newest = len(l.buckets) - 1 + l.end = time.Time{} + for i := range l.buckets { + if l.buckets[i] != nil { + l.buckets[i].Clear() + l.buckets[i] = nil + } + } +} + +func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) { + l.size = size + l.provider = f + l.buckets = make([]Observable, numBuckets) +} + +// Keeps a sequence of levels. Each level is responsible for storing data at +// a given resolution. For example, the first level stores data at a one +// minute resolution while the second level stores data at a one hour +// resolution. + +// Each level is represented by a sequence of buckets. Each bucket spans an +// interval equal to the resolution of the level. New observations are added +// to the last bucket. +type timeSeries struct { + provider func() Observable // make more Observable + numBuckets int // number of buckets in each level + levels []*tsLevel // levels of bucketed Observable + lastAdd time.Time // time of last Observable tracked + total Observable // convenient aggregation of all Observable + clock Clock // Clock for getting current time + pending Observable // observations not yet bucketed + pendingTime time.Time // what time are we keeping in pending + dirty bool // if there are pending observations +} + +// init initializes a level according to the supplied criteria. +func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) { + ts.provider = f + ts.numBuckets = numBuckets + ts.clock = clock + ts.levels = make([]*tsLevel, len(resolutions)) + + for i := range resolutions { + if i > 0 && resolutions[i-1] >= resolutions[i] { + log.Print("timeseries: resolutions must be monotonically increasing") + break + } + newLevel := new(tsLevel) + newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider) + ts.levels[i] = newLevel + } + + ts.Clear() +} + +// Clear removes all observations from the time series. +func (ts *timeSeries) Clear() { + ts.lastAdd = time.Time{} + ts.total = ts.resetObservation(ts.total) + ts.pending = ts.resetObservation(ts.pending) + ts.pendingTime = time.Time{} + ts.dirty = false + + for i := range ts.levels { + ts.levels[i].Clear() + } +} + +// Add records an observation at the current time. +func (ts *timeSeries) Add(observation Observable) { + ts.AddWithTime(observation, ts.clock.Time()) +} + +// AddWithTime records an observation at the specified time. +func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) { + + smallBucketDuration := ts.levels[0].size + + if t.After(ts.lastAdd) { + ts.lastAdd = t + } + + if t.After(ts.pendingTime) { + ts.advance(t) + ts.mergePendingUpdates() + ts.pendingTime = ts.levels[0].end + ts.pending.CopyFrom(observation) + ts.dirty = true + } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) { + // The observation is close enough to go into the pending bucket. + // This compensates for clock skewing and small scheduling delays + // by letting the update stay in the fast path. + ts.pending.Add(observation) + ts.dirty = true + } else { + ts.mergeValue(observation, t) + } +} + +// mergeValue inserts the observation at the specified time in the past into all levels. +func (ts *timeSeries) mergeValue(observation Observable, t time.Time) { + for _, level := range ts.levels { + index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size) + if 0 <= index && index < ts.numBuckets { + bucketNumber := (level.oldest + index) % ts.numBuckets + if level.buckets[bucketNumber] == nil { + level.buckets[bucketNumber] = level.provider() + } + level.buckets[bucketNumber].Add(observation) + } + } + ts.total.Add(observation) +} + +// mergePendingUpdates applies the pending updates into all levels. +func (ts *timeSeries) mergePendingUpdates() { + if ts.dirty { + ts.mergeValue(ts.pending, ts.pendingTime) + ts.pending = ts.resetObservation(ts.pending) + ts.dirty = false + } +} + +// advance cycles the buckets at each level until the latest bucket in +// each level can hold the time specified. +func (ts *timeSeries) advance(t time.Time) { + if !t.After(ts.levels[0].end) { + return + } + for i := 0; i < len(ts.levels); i++ { + level := ts.levels[i] + if !level.end.Before(t) { + break + } + + // If the time is sufficiently far, just clear the level and advance + // directly. + if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) { + for _, b := range level.buckets { + ts.resetObservation(b) + } + level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds()) + } + + for t.After(level.end) { + level.end = level.end.Add(level.size) + level.newest = level.oldest + level.oldest = (level.oldest + 1) % ts.numBuckets + ts.resetObservation(level.buckets[level.newest]) + } + + t = level.end + } +} + +// Latest returns the sum of the num latest buckets from the level. +func (ts *timeSeries) Latest(level, num int) Observable { + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + result := ts.provider() + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + if l.buckets[index] != nil { + result.Add(l.buckets[index]) + } + if index == 0 { + index = ts.numBuckets + } + index-- + } + + return result +} + +// LatestBuckets returns a copy of the num latest buckets from level. +func (ts *timeSeries) LatestBuckets(level, num int) []Observable { + if level < 0 || level > len(ts.levels) { + log.Print("timeseries: bad level argument: ", level) + return nil + } + if num < 0 || num >= ts.numBuckets { + log.Print("timeseries: bad num argument: ", num) + return nil + } + + results := make([]Observable, num) + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + result := ts.provider() + results[i] = result + if l.buckets[index] != nil { + result.CopyFrom(l.buckets[index]) + } + + if index == 0 { + index = ts.numBuckets + } + index -= 1 + } + return results +} + +// ScaleBy updates observations by scaling by factor. +func (ts *timeSeries) ScaleBy(factor float64) { + for _, l := range ts.levels { + for i := 0; i < ts.numBuckets; i++ { + l.buckets[i].Multiply(factor) + } + } + + ts.total.Multiply(factor) + ts.pending.Multiply(factor) +} + +// Range returns the sum of observations added over the specified time range. +// If start or finish times don't fall on bucket boundaries of the same +// level, then return values are approximate answers. +func (ts *timeSeries) Range(start, finish time.Time) Observable { + return ts.ComputeRange(start, finish, 1)[0] +} + +// Recent returns the sum of observations from the last delta. +func (ts *timeSeries) Recent(delta time.Duration) Observable { + now := ts.clock.Time() + return ts.Range(now.Add(-delta), now) +} + +// Total returns the total of all observations. +func (ts *timeSeries) Total() Observable { + ts.mergePendingUpdates() + return ts.total +} + +// ComputeRange computes a specified number of values into a slice using +// the observations recorded over the specified time period. The return +// values are approximate if the start or finish times don't fall on the +// bucket boundaries at the same level or if the number of buckets spanning +// the range is not an integral multiple of num. +func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable { + if start.After(finish) { + log.Printf("timeseries: start > finish, %v>%v", start, finish) + return nil + } + + if num < 0 { + log.Printf("timeseries: num < 0, %v", num) + return nil + } + + results := make([]Observable, num) + + for _, l := range ts.levels { + if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) { + ts.extract(l, start, finish, num, results) + return results + } + } + + // Failed to find a level that covers the desired range. So just + // extract from the last level, even if it doesn't cover the entire + // desired range. + ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) + + return results +} + +// RecentList returns the specified number of values in slice over the most +// recent time period of the specified range. +func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable { + if delta < 0 { + return nil + } + now := ts.clock.Time() + return ts.ComputeRange(now.Add(-delta), now, num) +} + +// extract returns a slice of specified number of observations from a given +// level over a given range. +func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) { + ts.mergePendingUpdates() + + srcInterval := l.size + dstInterval := finish.Sub(start) / time.Duration(num) + dstStart := start + srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets)) + + srcIndex := 0 + + // Where should scanning start? + if dstStart.After(srcStart) { + advance := dstStart.Sub(srcStart) / srcInterval + srcIndex += int(advance) + srcStart = srcStart.Add(advance * srcInterval) + } + + // The i'th value is computed as show below. + // interval = (finish/start)/num + // i'th value = sum of observation in range + // [ start + i * interval, + // start + (i + 1) * interval ) + for i := 0; i < num; i++ { + results[i] = ts.resetObservation(results[i]) + dstEnd := dstStart.Add(dstInterval) + for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) { + srcEnd := srcStart.Add(srcInterval) + if srcEnd.After(ts.lastAdd) { + srcEnd = ts.lastAdd + } + + if !srcEnd.Before(dstStart) { + srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets] + if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) { + // dst completely contains src. + if srcValue != nil { + results[i].Add(srcValue) + } + } else { + // dst partially overlaps src. + overlapStart := maxTime(srcStart, dstStart) + overlapEnd := minTime(srcEnd, dstEnd) + base := srcEnd.Sub(srcStart) + fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds() + + used := ts.provider() + if srcValue != nil { + used.CopyFrom(srcValue) + } + used.Multiply(fraction) + results[i].Add(used) + } + + if srcEnd.After(dstEnd) { + break + } + } + srcIndex++ + srcStart = srcStart.Add(srcInterval) + } + dstStart = dstStart.Add(dstInterval) + } +} + +// resetObservation clears the content so the struct may be reused. +func (ts *timeSeries) resetObservation(observation Observable) Observable { + if observation == nil { + observation = ts.provider() + } else { + observation.Clear() + } + return observation +} + +// TimeSeries tracks data at granularities from 1 second to 16 weeks. +type TimeSeries struct { + timeSeries +} + +// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable. +func NewTimeSeries(f func() Observable) *TimeSeries { + return NewTimeSeriesWithClock(f, defaultClockInstance) +} + +// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries { + ts := new(TimeSeries) + ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock) + return ts +} + +// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour. +type MinuteHourSeries struct { + timeSeries +} + +// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable. +func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries { + return NewMinuteHourSeriesWithClock(f, defaultClockInstance) +} + +// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries { + ts := new(MinuteHourSeries) + ts.timeSeries.init(minuteHourSeriesResolutions, f, + minuteHourSeriesNumBuckets, clock) + return ts +} + +func (ts *MinuteHourSeries) Minute() Observable { + return ts.timeSeries.Latest(0, 60) +} + +func (ts *MinuteHourSeries) Hour() Observable { + return ts.timeSeries.Latest(1, 60) +} + +func minTime(a, b time.Time) time.Time { + if a.Before(b) { + return a + } + return b +} + +func maxTime(a, b time.Time) time.Time { + if a.After(b) { + return a + } + return b +} diff --git a/Godeps/_workspace/src/golang.org/x/net/internal/timeseries/timeseries_test.go b/Godeps/_workspace/src/golang.org/x/net/internal/timeseries/timeseries_test.go new file mode 100644 index 000000000..66325a912 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/net/internal/timeseries/timeseries_test.go @@ -0,0 +1,170 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package timeseries + +import ( + "math" + "testing" + "time" +) + +func isNear(x *Float, y float64, tolerance float64) bool { + return math.Abs(x.Value()-y) < tolerance +} + +func isApproximate(x *Float, y float64) bool { + return isNear(x, y, 1e-2) +} + +func checkApproximate(t *testing.T, o Observable, y float64) { + x := o.(*Float) + if !isApproximate(x, y) { + t.Errorf("Wanted %g, got %g", y, x.Value()) + } +} + +func checkNear(t *testing.T, o Observable, y, tolerance float64) { + x := o.(*Float) + if !isNear(x, y, tolerance) { + t.Errorf("Wanted %g +- %g, got %g", y, tolerance, x.Value()) + } +} + +var baseTime = time.Date(2013, 1, 1, 0, 0, 0, 0, time.UTC) + +func tu(s int64) time.Time { + return baseTime.Add(time.Duration(s) * time.Second) +} + +func tu2(s int64, ns int64) time.Time { + return baseTime.Add(time.Duration(s)*time.Second + time.Duration(ns)*time.Nanosecond) +} + +func TestBasicTimeSeries(t *testing.T) { + ts := NewTimeSeries(NewFloat) + fo := new(Float) + *fo = Float(10) + ts.AddWithTime(fo, tu(1)) + ts.AddWithTime(fo, tu(1)) + ts.AddWithTime(fo, tu(1)) + ts.AddWithTime(fo, tu(1)) + checkApproximate(t, ts.Range(tu(0), tu(1)), 40) + checkApproximate(t, ts.Total(), 40) + ts.AddWithTime(fo, tu(3)) + ts.AddWithTime(fo, tu(3)) + ts.AddWithTime(fo, tu(3)) + checkApproximate(t, ts.Range(tu(0), tu(2)), 40) + checkApproximate(t, ts.Range(tu(2), tu(4)), 30) + checkApproximate(t, ts.Total(), 70) + ts.AddWithTime(fo, tu(1)) + ts.AddWithTime(fo, tu(1)) + checkApproximate(t, ts.Range(tu(0), tu(2)), 60) + checkApproximate(t, ts.Range(tu(2), tu(4)), 30) + checkApproximate(t, ts.Total(), 90) + *fo = Float(100) + ts.AddWithTime(fo, tu(100)) + checkApproximate(t, ts.Range(tu(99), tu(100)), 100) + checkApproximate(t, ts.Range(tu(0), tu(4)), 36) + checkApproximate(t, ts.Total(), 190) + *fo = Float(10) + ts.AddWithTime(fo, tu(1)) + ts.AddWithTime(fo, tu(1)) + checkApproximate(t, ts.Range(tu(0), tu(4)), 44) + checkApproximate(t, ts.Range(tu(37), tu2(100, 100e6)), 100) + checkApproximate(t, ts.Range(tu(50), tu2(100, 100e6)), 100) + checkApproximate(t, ts.Range(tu(99), tu2(100, 100e6)), 100) + checkApproximate(t, ts.Total(), 210) + + for i, l := range ts.ComputeRange(tu(36), tu(100), 64) { + if i == 63 { + checkApproximate(t, l, 100) + } else { + checkApproximate(t, l, 0) + } + } + + checkApproximate(t, ts.Range(tu(0), tu(100)), 210) + checkApproximate(t, ts.Range(tu(10), tu(100)), 100) + + for i, l := range ts.ComputeRange(tu(0), tu(100), 100) { + if i < 10 { + checkApproximate(t, l, 11) + } else if i >= 90 { + checkApproximate(t, l, 10) + } else { + checkApproximate(t, l, 0) + } + } +} + +func TestFloat(t *testing.T) { + f := Float(1) + if g, w := f.String(), "1"; g != w { + t.Errorf("Float(1).String = %q; want %q", g, w) + } + f2 := Float(2) + var o Observable = &f2 + f.Add(o) + if g, w := f.Value(), 3.0; g != w { + t.Errorf("Float post-add = %v; want %v", g, w) + } + f.Multiply(2) + if g, w := f.Value(), 6.0; g != w { + t.Errorf("Float post-multiply = %v; want %v", g, w) + } + f.Clear() + if g, w := f.Value(), 0.0; g != w { + t.Errorf("Float post-clear = %v; want %v", g, w) + } + f.CopyFrom(&f2) + if g, w := f.Value(), 2.0; g != w { + t.Errorf("Float post-CopyFrom = %v; want %v", g, w) + } +} + +type mockClock struct { + time time.Time +} + +func (m *mockClock) Time() time.Time { return m.time } +func (m *mockClock) Set(t time.Time) { m.time = t } + +const buckets = 6 + +var testResolutions = []time.Duration{ + 10 * time.Second, // level holds one minute of observations + 100 * time.Second, // level holds ten minutes of observations + 10 * time.Minute, // level holds one hour of observations +} + +// TestTimeSeries uses a small number of buckets to force a higher +// error rate on approximations from the timeseries. +type TestTimeSeries struct { + timeSeries +} + +func TestExpectedErrorRate(t *testing.T) { + ts := new(TestTimeSeries) + fake := new(mockClock) + fake.Set(time.Now()) + ts.timeSeries.init(testResolutions, NewFloat, buckets, fake) + for i := 1; i <= 61*61; i++ { + fake.Set(fake.Time().Add(1 * time.Second)) + ob := Float(1) + ts.AddWithTime(&ob, fake.Time()) + + // The results should be accurate within one missing bucket (1/6) of the observations recorded. + checkNear(t, ts.Latest(0, buckets), min(float64(i), 60), 10) + checkNear(t, ts.Latest(1, buckets), min(float64(i), 600), 100) + checkNear(t, ts.Latest(2, buckets), min(float64(i), 3600), 600) + } +} + +func min(a, b float64) float64 { + if a < b { + return a + } + return b +} diff --git a/Godeps/_workspace/src/golang.org/x/net/trace/events.go b/Godeps/_workspace/src/golang.org/x/net/trace/events.go new file mode 100644 index 000000000..e66c7e328 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/net/trace/events.go @@ -0,0 +1,524 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net/http" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "text/tabwriter" + "time" +) + +var eventsTmpl = template.Must(template.New("events").Funcs(template.FuncMap{ + "elapsed": elapsed, + "trimSpace": strings.TrimSpace, +}).Parse(eventsHTML)) + +const maxEventsPerLog = 100 + +type bucket struct { + MaxErrAge time.Duration + String string +} + +var buckets = []bucket{ + {0, "total"}, + {10 * time.Second, "errs<10s"}, + {1 * time.Minute, "errs<1m"}, + {10 * time.Minute, "errs<10m"}, + {1 * time.Hour, "errs<1h"}, + {10 * time.Hour, "errs<10h"}, + {24000 * time.Hour, "errors"}, +} + +// RenderEvents renders the HTML page typically served at /debug/events. +// It does not do any auth checking; see AuthRequest for the default auth check +// used by the handler registered on http.DefaultServeMux. +// req may be nil. +func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { + now := time.Now() + data := &struct { + Families []string // family names + Buckets []bucket + Counts [][]int // eventLog count per family/bucket + + // Set when a bucket has been selected. + Family string + Bucket int + EventLogs eventLogs + Expanded bool + }{ + Buckets: buckets, + } + + data.Families = make([]string, 0, len(families)) + famMu.RLock() + for name := range families { + data.Families = append(data.Families, name) + } + famMu.RUnlock() + sort.Strings(data.Families) + + // Count the number of eventLogs in each family for each error age. + data.Counts = make([][]int, len(data.Families)) + for i, name := range data.Families { + // TODO(sameer): move this loop under the family lock. + f := getEventFamily(name) + data.Counts[i] = make([]int, len(data.Buckets)) + for j, b := range data.Buckets { + data.Counts[i][j] = f.Count(now, b.MaxErrAge) + } + } + + if req != nil { + var ok bool + data.Family, data.Bucket, ok = parseEventsArgs(req) + if !ok { + // No-op + } else { + data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) + } + if data.EventLogs != nil { + defer data.EventLogs.Free() + sort.Sort(data.EventLogs) + } + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + } + + famMu.RLock() + defer famMu.RUnlock() + if err := eventsTmpl.Execute(w, data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < 0 || b >= len(buckets) { + return "", 0, false + } + return fam, b, true +} + +// An EventLog provides a log of events associated with a specific object. +type EventLog interface { + // Printf formats its arguments with fmt.Sprintf and adds the + // result to the event log. + Printf(format string, a ...interface{}) + + // Errorf is like Printf, but it marks this event as an error. + Errorf(format string, a ...interface{}) + + // Finish declares that this event log is complete. + // The event log should not be used after calling this method. + Finish() +} + +// NewEventLog returns a new EventLog with the specified family name +// and title. +func NewEventLog(family, title string) EventLog { + el := newEventLog() + el.ref() + el.Family, el.Title = family, title + el.Start = time.Now() + el.events = make([]logEntry, 0, maxEventsPerLog) + el.stack = make([]uintptr, 32) + n := runtime.Callers(2, el.stack) + el.stack = el.stack[:n] + + getEventFamily(family).add(el) + return el +} + +func (el *eventLog) Finish() { + getEventFamily(el.Family).remove(el) + el.unref() // matches ref in New +} + +var ( + famMu sync.RWMutex + families = make(map[string]*eventFamily) // family name => family +) + +func getEventFamily(fam string) *eventFamily { + famMu.Lock() + defer famMu.Unlock() + f := families[fam] + if f == nil { + f = &eventFamily{} + families[fam] = f + } + return f +} + +type eventFamily struct { + mu sync.RWMutex + eventLogs eventLogs +} + +func (f *eventFamily) add(el *eventLog) { + f.mu.Lock() + f.eventLogs = append(f.eventLogs, el) + f.mu.Unlock() +} + +func (f *eventFamily) remove(el *eventLog) { + f.mu.Lock() + defer f.mu.Unlock() + for i, el0 := range f.eventLogs { + if el == el0 { + copy(f.eventLogs[i:], f.eventLogs[i+1:]) + f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] + return + } + } +} + +func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { + f.mu.RLock() + defer f.mu.RUnlock() + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + n++ + } + } + return +} + +func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { + f.mu.RLock() + defer f.mu.RUnlock() + els = make(eventLogs, 0, len(f.eventLogs)) + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + el.ref() + els = append(els, el) + } + } + return +} + +type eventLogs []*eventLog + +// Free calls unref on each element of the list. +func (els eventLogs) Free() { + for _, el := range els { + el.unref() + } +} + +// eventLogs may be sorted in reverse chronological order. +func (els eventLogs) Len() int { return len(els) } +func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } +func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } + +// A logEntry is a timestamped log entry in an event log. +type logEntry struct { + When time.Time + Elapsed time.Duration // since previous event in log + NewDay bool // whether this event is on a different day to the previous event + What string + IsErr bool +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e logEntry) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// An eventLog represents an active event log. +type eventLog struct { + // Family is the top-level grouping of event logs to which this belongs. + Family string + + // Title is the title of this event log. + Title string + + // Timing information. + Start time.Time + + // Call stack where this event log was created. + stack []uintptr + + // Append-only sequence of events. + // + // TODO(sameer): change this to a ring buffer to avoid the array copy + // when we hit maxEventsPerLog. + mu sync.RWMutex + events []logEntry + LastErrorTime time.Time + discarded int + + refs int32 // how many buckets this is in +} + +func (el *eventLog) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + el.Family = "" + el.Title = "" + el.Start = time.Time{} + el.stack = nil + el.events = nil + el.LastErrorTime = time.Time{} + el.discarded = 0 + el.refs = 0 +} + +func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { + if maxErrAge == 0 { + return true + } + el.mu.RLock() + defer el.mu.RUnlock() + return now.Sub(el.LastErrorTime) < maxErrAge +} + +// delta returns the elapsed time since the last event or the log start, +// and whether it spans midnight. +// L >= el.mu +func (el *eventLog) delta(t time.Time) (time.Duration, bool) { + if len(el.events) == 0 { + return t.Sub(el.Start), false + } + prev := el.events[len(el.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() + +} + +func (el *eventLog) Printf(format string, a ...interface{}) { + el.printf(false, format, a...) +} + +func (el *eventLog) Errorf(format string, a ...interface{}) { + el.printf(true, format, a...) +} + +func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { + e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} + el.mu.Lock() + e.Elapsed, e.NewDay = el.delta(e.When) + if len(el.events) < maxEventsPerLog { + el.events = append(el.events, e) + } else { + // Discard the oldest event. + if el.discarded == 0 { + // el.discarded starts at two to count for the event it + // is replacing, plus the next one that we are about to + // drop. + el.discarded = 2 + } else { + el.discarded++ + } + // TODO(sameer): if this causes allocations on a critical path, + // change eventLog.What to be a fmt.Stringer, as in trace.go. + el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + el.events[0].When = el.events[1].When + copy(el.events[1:], el.events[2:]) + el.events[maxEventsPerLog-1] = e + } + if e.IsErr { + el.LastErrorTime = e.When + } + el.mu.Unlock() +} + +func (el *eventLog) ref() { + atomic.AddInt32(&el.refs, 1) +} + +func (el *eventLog) unref() { + if atomic.AddInt32(&el.refs, -1) == 0 { + freeEventLog(el) + } +} + +func (el *eventLog) When() string { + return el.Start.Format("2006/01/02 15:04:05.000000") +} + +func (el *eventLog) ElapsedTime() string { + elapsed := time.Since(el.Start) + return fmt.Sprintf("%.6f", elapsed.Seconds()) +} + +func (el *eventLog) Stack() string { + buf := new(bytes.Buffer) + tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) + printStackRecord(tw, el.stack) + tw.Flush() + return buf.String() +} + +// printStackRecord prints the function + source line information +// for a single stack trace. +// Adapted from runtime/pprof/pprof.go. +func printStackRecord(w io.Writer, stk []uintptr) { + for _, pc := range stk { + f := runtime.FuncForPC(pc) + if f == nil { + continue + } + file, line := f.FileLine(pc) + name := f.Name() + // Hide runtime.goexit and any runtime functions at the beginning. + if strings.HasPrefix(name, "runtime.") { + continue + } + fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) + } +} + +func (el *eventLog) Events() []logEntry { + el.mu.RLock() + defer el.mu.RUnlock() + return el.events +} + +// freeEventLogs is a freelist of *eventLog +var freeEventLogs = make(chan *eventLog, 1000) + +// newEventLog returns a event log ready to use. +func newEventLog() *eventLog { + select { + case el := <-freeEventLogs: + return el + default: + return new(eventLog) + } +} + +// freeEventLog adds el to freeEventLogs if there's room. +// This is non-blocking. +func freeEventLog(el *eventLog) { + el.reset() + select { + case freeEventLogs <- el: + default: + } +} + +const eventsHTML = ` + + + events + + + + +

/debug/events

+ + + {{range $i, $fam := .Families}} + + + + {{range $j, $bucket := $.Buckets}} + {{$n := index $.Counts $i $j}} + + {{end}} + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} {{$bucket.String}}] + {{if $n}}{{end}} +
+ +{{if $.EventLogs}} +
+

Family: {{$.Family}}

+ +{{if $.Expanded}}{{end}} +[Summary]{{if $.Expanded}}{{end}} + +{{if not $.Expanded}}{{end}} +[Expanded]{{if not $.Expanded}}{{end}} + + + + {{range $el := $.EventLogs}} + + + + + {{if $.Expanded}} + + + + + + {{range $el.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
WhenElapsed
{{$el.When}}{{$el.ElapsedTime}}{{$el.Title}} +
{{$el.Stack|trimSpace}}
{{.WhenString}}{{elapsed .Elapsed}}.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
+{{end}} + + +` diff --git a/Godeps/_workspace/src/golang.org/x/net/trace/histogram.go b/Godeps/_workspace/src/golang.org/x/net/trace/histogram.go new file mode 100644 index 000000000..bb42aa532 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/net/trace/histogram.go @@ -0,0 +1,356 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +// This file implements histogramming for RPC statistics collection. + +import ( + "bytes" + "fmt" + "html/template" + "log" + "math" + + "golang.org/x/net/internal/timeseries" +) + +const ( + bucketCount = 38 +) + +// histogram keeps counts of values in buckets that are spaced +// out in powers of 2: 0-1, 2-3, 4-7... +// histogram implements timeseries.Observable +type histogram struct { + sum int64 // running total of measurements + sumOfSquares float64 // square of running total + buckets []int64 // bucketed values for histogram + value int // holds a single value as an optimization + valueCount int64 // number of values recorded for single value +} + +// AddMeasurement records a value measurement observation to the histogram. +func (h *histogram) addMeasurement(value int64) { + // TODO: assert invariant + h.sum += value + h.sumOfSquares += float64(value) * float64(value) + + bucketIndex := getBucket(value) + + if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { + h.value = bucketIndex + h.valueCount++ + } else { + h.allocateBuckets() + h.buckets[bucketIndex]++ + } +} + +func (h *histogram) allocateBuckets() { + if h.buckets == nil { + h.buckets = make([]int64, bucketCount) + h.buckets[h.value] = h.valueCount + h.value = 0 + h.valueCount = -1 + } +} + +func log2(i int64) int { + n := 0 + for ; i >= 0x100; i >>= 8 { + n += 8 + } + for ; i > 0; i >>= 1 { + n += 1 + } + return n +} + +func getBucket(i int64) (index int) { + index = log2(i) - 1 + if index < 0 { + index = 0 + } + if index >= bucketCount { + index = bucketCount - 1 + } + return +} + +// Total returns the number of recorded observations. +func (h *histogram) total() (total int64) { + if h.valueCount >= 0 { + total = h.valueCount + } + for _, val := range h.buckets { + total += int64(val) + } + return +} + +// Average returns the average value of recorded observations. +func (h *histogram) average() float64 { + t := h.total() + if t == 0 { + return 0 + } + return float64(h.sum) / float64(t) +} + +// Variance returns the variance of recorded observations. +func (h *histogram) variance() float64 { + t := float64(h.total()) + if t == 0 { + return 0 + } + s := float64(h.sum) / t + return h.sumOfSquares/t - s*s +} + +// StandardDeviation returns the standard deviation of recorded observations. +func (h *histogram) standardDeviation() float64 { + return math.Sqrt(h.variance()) +} + +// PercentileBoundary estimates the value that the given fraction of recorded +// observations are less than. +func (h *histogram) percentileBoundary(percentile float64) int64 { + total := h.total() + + // Corner cases (make sure result is strictly less than Total()) + if total == 0 { + return 0 + } else if total == 1 { + return int64(h.average()) + } + + percentOfTotal := round(float64(total) * percentile) + var runningTotal int64 + + for i := range h.buckets { + value := h.buckets[i] + runningTotal += value + if runningTotal == percentOfTotal { + // We hit an exact bucket boundary. If the next bucket has data, it is a + // good estimate of the value. If the bucket is empty, we interpolate the + // midpoint between the next bucket's boundary and the next non-zero + // bucket. If the remaining buckets are all empty, then we use the + // boundary for the next bucket as the estimate. + j := uint8(i + 1) + min := bucketBoundary(j) + if runningTotal < total { + for h.buckets[j] == 0 { + j++ + } + } + max := bucketBoundary(j) + return min + round(float64(max-min)/2) + } else if runningTotal > percentOfTotal { + // The value is in this bucket. Interpolate the value. + delta := runningTotal - percentOfTotal + percentBucket := float64(value-delta) / float64(value) + bucketMin := bucketBoundary(uint8(i)) + nextBucketMin := bucketBoundary(uint8(i + 1)) + bucketSize := nextBucketMin - bucketMin + return bucketMin + round(percentBucket*float64(bucketSize)) + } + } + return bucketBoundary(bucketCount - 1) +} + +// Median returns the estimated median of the observed values. +func (h *histogram) median() int64 { + return h.percentileBoundary(0.5) +} + +// Add adds other to h. +func (h *histogram) Add(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == 0 { + // Other histogram is empty + } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { + // Both have a single bucketed value, aggregate them + h.valueCount += o.valueCount + } else { + // Two different values necessitate buckets in this histogram + h.allocateBuckets() + if o.valueCount >= 0 { + h.buckets[o.value] += o.valueCount + } else { + for i := range h.buckets { + h.buckets[i] += o.buckets[i] + } + } + } + h.sumOfSquares += o.sumOfSquares + h.sum += o.sum +} + +// Clear resets the histogram to an empty state, removing all observed values. +func (h *histogram) Clear() { + h.buckets = nil + h.value = 0 + h.valueCount = 0 + h.sum = 0 + h.sumOfSquares = 0 +} + +// CopyFrom copies from other, which must be a *histogram, into h. +func (h *histogram) CopyFrom(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == -1 { + h.allocateBuckets() + copy(h.buckets, o.buckets) + } + h.sum = o.sum + h.sumOfSquares = o.sumOfSquares + h.value = o.value + h.valueCount = o.valueCount +} + +// Multiply scales the histogram by the specified ratio. +func (h *histogram) Multiply(ratio float64) { + if h.valueCount == -1 { + for i := range h.buckets { + h.buckets[i] = int64(float64(h.buckets[i]) * ratio) + } + } else { + h.valueCount = int64(float64(h.valueCount) * ratio) + } + h.sum = int64(float64(h.sum) * ratio) + h.sumOfSquares = h.sumOfSquares * ratio +} + +// New creates a new histogram. +func (h *histogram) New() timeseries.Observable { + r := new(histogram) + r.Clear() + return r +} + +func (h *histogram) String() string { + return fmt.Sprintf("%d, %f, %d, %d, %v", + h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) +} + +// round returns the closest int64 to the argument +func round(in float64) int64 { + return int64(math.Floor(in + 0.5)) +} + +// bucketBoundary returns the first value in the bucket. +func bucketBoundary(bucket uint8) int64 { + if bucket == 0 { + return 0 + } + return 1 << bucket +} + +// bucketData holds data about a specific bucket for use in distTmpl. +type bucketData struct { + Lower, Upper int64 + N int64 + Pct, CumulativePct float64 + GraphWidth int +} + +// data holds data about a Distribution for use in distTmpl. +type data struct { + Buckets []*bucketData + Count, Median int64 + Mean, StandardDeviation float64 +} + +// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. +const maxHTMLBarWidth = 350.0 + +// newData returns data representing h for use in distTmpl. +func (h *histogram) newData() *data { + // Force the allocation of buckets to simplify the rendering implementation + h.allocateBuckets() + // We scale the bars on the right so that the largest bar is + // maxHTMLBarWidth pixels in width. + maxBucket := int64(0) + for _, n := range h.buckets { + if n > maxBucket { + maxBucket = n + } + } + total := h.total() + barsizeMult := maxHTMLBarWidth / float64(maxBucket) + var pctMult float64 + if total == 0 { + pctMult = 1.0 + } else { + pctMult = 100.0 / float64(total) + } + + buckets := make([]*bucketData, len(h.buckets)) + runningTotal := int64(0) + for i, n := range h.buckets { + if n == 0 { + continue + } + runningTotal += n + var upperBound int64 + if i < bucketCount-1 { + upperBound = bucketBoundary(uint8(i + 1)) + } else { + upperBound = math.MaxInt64 + } + buckets[i] = &bucketData{ + Lower: bucketBoundary(uint8(i)), + Upper: upperBound, + N: n, + Pct: float64(n) * pctMult, + CumulativePct: float64(runningTotal) * pctMult, + GraphWidth: int(float64(n) * barsizeMult), + } + } + return &data{ + Buckets: buckets, + Count: total, + Median: h.median(), + Mean: h.average(), + StandardDeviation: h.standardDeviation(), + } +} + +func (h *histogram) html() template.HTML { + buf := new(bytes.Buffer) + if err := distTmpl.Execute(buf, h.newData()); err != nil { + buf.Reset() + log.Printf("net/trace: couldn't execute template: %v", err) + } + return template.HTML(buf.String()) +} + +// Input: data +var distTmpl = template.Must(template.New("distTmpl").Parse(` + + + + + + + +
Count: {{.Count}}Mean: {{printf "%.0f" .Mean}}StdDev: {{printf "%.0f" .StandardDeviation}}Median: {{.Median}}
+
+ +{{range $b := .Buckets}} +{{if $b}} + + + + + + + + + +{{end}} +{{end}} +
[{{.Lower}},{{.Upper}}){{.N}}{{printf "%#.3f" .Pct}}%{{printf "%#.3f" .CumulativePct}}%
+`)) diff --git a/Godeps/_workspace/src/golang.org/x/net/trace/histogram_test.go b/Godeps/_workspace/src/golang.org/x/net/trace/histogram_test.go new file mode 100644 index 000000000..d384b9332 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/net/trace/histogram_test.go @@ -0,0 +1,325 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "math" + "testing" +) + +type sumTest struct { + value int64 + sum int64 + sumOfSquares float64 + total int64 +} + +var sumTests = []sumTest{ + {100, 100, 10000, 1}, + {50, 150, 12500, 2}, + {50, 200, 15000, 3}, + {50, 250, 17500, 4}, +} + +type bucketingTest struct { + in int64 + log int + bucket int +} + +var bucketingTests = []bucketingTest{ + {0, 0, 0}, + {1, 1, 0}, + {2, 2, 1}, + {3, 2, 1}, + {4, 3, 2}, + {1000, 10, 9}, + {1023, 10, 9}, + {1024, 11, 10}, + {1000000, 20, 19}, +} + +type multiplyTest struct { + in int64 + ratio float64 + expectedSum int64 + expectedTotal int64 + expectedSumOfSquares float64 +} + +var multiplyTests = []multiplyTest{ + {15, 2.5, 37, 2, 562.5}, + {128, 4.6, 758, 13, 77953.9}, +} + +type percentileTest struct { + fraction float64 + expected int64 +} + +var percentileTests = []percentileTest{ + {0.25, 48}, + {0.5, 96}, + {0.6, 109}, + {0.75, 128}, + {0.90, 205}, + {0.95, 230}, + {0.99, 256}, +} + +func TestSum(t *testing.T) { + var h histogram + + for _, test := range sumTests { + h.addMeasurement(test.value) + sum := h.sum + if sum != test.sum { + t.Errorf("h.Sum = %v WANT: %v", sum, test.sum) + } + + sumOfSquares := h.sumOfSquares + if sumOfSquares != test.sumOfSquares { + t.Errorf("h.SumOfSquares = %v WANT: %v", sumOfSquares, test.sumOfSquares) + } + + total := h.total() + if total != test.total { + t.Errorf("h.Total = %v WANT: %v", total, test.total) + } + } +} + +func TestMultiply(t *testing.T) { + var h histogram + for i, test := range multiplyTests { + h.addMeasurement(test.in) + h.Multiply(test.ratio) + if h.sum != test.expectedSum { + t.Errorf("#%v: h.sum = %v WANT: %v", i, h.sum, test.expectedSum) + } + if h.total() != test.expectedTotal { + t.Errorf("#%v: h.total = %v WANT: %v", i, h.total(), test.expectedTotal) + } + if h.sumOfSquares != test.expectedSumOfSquares { + t.Errorf("#%v: h.SumOfSquares = %v WANT: %v", i, test.expectedSumOfSquares, h.sumOfSquares) + } + } +} + +func TestBucketingFunctions(t *testing.T) { + for _, test := range bucketingTests { + log := log2(test.in) + if log != test.log { + t.Errorf("log2 = %v WANT: %v", log, test.log) + } + + bucket := getBucket(test.in) + if bucket != test.bucket { + t.Errorf("getBucket = %v WANT: %v", bucket, test.bucket) + } + } +} + +func TestAverage(t *testing.T) { + a := new(histogram) + average := a.average() + if average != 0 { + t.Errorf("Average of empty histogram was %v WANT: 0", average) + } + + a.addMeasurement(1) + a.addMeasurement(1) + a.addMeasurement(3) + const expected = float64(5) / float64(3) + average = a.average() + + if !isApproximate(average, expected) { + t.Errorf("Average = %g WANT: %v", average, expected) + } +} + +func TestStandardDeviation(t *testing.T) { + a := new(histogram) + add(a, 10, 1<<4) + add(a, 10, 1<<5) + add(a, 10, 1<<6) + stdDev := a.standardDeviation() + const expected = 19.95 + + if !isApproximate(stdDev, expected) { + t.Errorf("StandardDeviation = %v WANT: %v", stdDev, expected) + } + + // No values + a = new(histogram) + stdDev = a.standardDeviation() + + if !isApproximate(stdDev, 0) { + t.Errorf("StandardDeviation = %v WANT: 0", stdDev) + } + + add(a, 1, 1<<4) + if !isApproximate(stdDev, 0) { + t.Errorf("StandardDeviation = %v WANT: 0", stdDev) + } + + add(a, 10, 1<<4) + if !isApproximate(stdDev, 0) { + t.Errorf("StandardDeviation = %v WANT: 0", stdDev) + } +} + +func TestPercentileBoundary(t *testing.T) { + a := new(histogram) + add(a, 5, 1<<4) + add(a, 10, 1<<6) + add(a, 5, 1<<7) + + for _, test := range percentileTests { + percentile := a.percentileBoundary(test.fraction) + if percentile != test.expected { + t.Errorf("h.PercentileBoundary (fraction=%v) = %v WANT: %v", test.fraction, percentile, test.expected) + } + } +} + +func TestCopyFrom(t *testing.T) { + a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1} + b := histogram{6, 36, []int64{2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39}, 5, -1} + + a.CopyFrom(&b) + + if a.String() != b.String() { + t.Errorf("a.String = %s WANT: %s", a.String(), b.String()) + } +} + +func TestClear(t *testing.T) { + a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1} + + a.Clear() + + expected := "0, 0.000000, 0, 0, []" + if a.String() != expected { + t.Errorf("a.String = %s WANT %s", a.String(), expected) + } +} + +func TestNew(t *testing.T) { + a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1} + b := a.New() + + expected := "0, 0.000000, 0, 0, []" + if b.(*histogram).String() != expected { + t.Errorf("b.(*histogram).String = %s WANT: %s", b.(*histogram).String(), expected) + } +} + +func TestAdd(t *testing.T) { + // The tests here depend on the associativity of addMeasurement and Add. + // Add empty observation + a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1} + b := a.New() + + expected := a.String() + a.Add(b) + if a.String() != expected { + t.Errorf("a.String = %s WANT: %s", a.String(), expected) + } + + // Add same bucketed value, no new buckets + c := new(histogram) + d := new(histogram) + e := new(histogram) + c.addMeasurement(12) + d.addMeasurement(11) + e.addMeasurement(12) + e.addMeasurement(11) + c.Add(d) + if c.String() != e.String() { + t.Errorf("c.String = %s WANT: %s", c.String(), e.String()) + } + + // Add bucketed values + f := new(histogram) + g := new(histogram) + h := new(histogram) + f.addMeasurement(4) + f.addMeasurement(12) + f.addMeasurement(100) + g.addMeasurement(18) + g.addMeasurement(36) + g.addMeasurement(255) + h.addMeasurement(4) + h.addMeasurement(12) + h.addMeasurement(100) + h.addMeasurement(18) + h.addMeasurement(36) + h.addMeasurement(255) + f.Add(g) + if f.String() != h.String() { + t.Errorf("f.String = %q WANT: %q", f.String(), h.String()) + } + + // add buckets to no buckets + i := new(histogram) + j := new(histogram) + k := new(histogram) + j.addMeasurement(18) + j.addMeasurement(36) + j.addMeasurement(255) + k.addMeasurement(18) + k.addMeasurement(36) + k.addMeasurement(255) + i.Add(j) + if i.String() != k.String() { + t.Errorf("i.String = %q WANT: %q", i.String(), k.String()) + } + + // add buckets to single value (no overlap) + l := new(histogram) + m := new(histogram) + n := new(histogram) + l.addMeasurement(0) + m.addMeasurement(18) + m.addMeasurement(36) + m.addMeasurement(255) + n.addMeasurement(0) + n.addMeasurement(18) + n.addMeasurement(36) + n.addMeasurement(255) + l.Add(m) + if l.String() != n.String() { + t.Errorf("l.String = %q WANT: %q", l.String(), n.String()) + } + + // mixed order + o := new(histogram) + p := new(histogram) + o.addMeasurement(0) + o.addMeasurement(2) + o.addMeasurement(0) + p.addMeasurement(0) + p.addMeasurement(0) + p.addMeasurement(2) + if o.String() != p.String() { + t.Errorf("o.String = %q WANT: %q", o.String(), p.String()) + } +} + +func add(h *histogram, times int, val int64) { + for i := 0; i < times; i++ { + h.addMeasurement(val) + } +} + +func isApproximate(x, y float64) bool { + return math.Abs(x-y) < 1e-2 +} diff --git a/Godeps/_workspace/src/golang.org/x/net/trace/trace.go b/Godeps/_workspace/src/golang.org/x/net/trace/trace.go new file mode 100644 index 000000000..8092e2ec6 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/net/trace/trace.go @@ -0,0 +1,1056 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package trace implements tracing of requests and long-lived objects. +It exports HTTP interfaces on /debug/requests and /debug/events. + +A trace.Trace provides tracing for short-lived objects, usually requests. +A request handler might be implemented like this: + + func fooHandler(w http.ResponseWriter, req *http.Request) { + tr := trace.New("mypkg.Foo", req.URL.Path) + defer tr.Finish() + ... + tr.LazyPrintf("some event %q happened", str) + ... + if err := somethingImportant(); err != nil { + tr.LazyPrintf("somethingImportant failed: %v", err) + tr.SetError() + } + } + +The /debug/requests HTTP endpoint organizes the traces by family, +errors, and duration. It also provides histogram of request duration +for each family. + +A trace.EventLog provides tracing for long-lived objects, such as RPC +connections. + + // A Fetcher fetches URL paths for a single domain. + type Fetcher struct { + domain string + events *trace.EventLog + } + + func NewFetcher(domain string) *Fetcher { + return &Fetcher{ + domain, + trace.NewEventLog("mypkg.Fetcher", domain), + } + } + + func (f *Fetcher) Fetch(path string) (string, error) { + resp, err := http.Get("http://"+domain+"/"+path) + if err != nil { + f.events.Errorf("Get(%q) = %v", path, err) + return + } + f.events.Printf("Get(%q) = %s", path, resp.Code) + ... + } + + func (f *Fetcher) Close() error { + f.events.Finish() + } + +The /debug/events HTTP endpoint organizes the event logs by family and +by time since the last error. The expanded view displays recent log +entries and the log's call stack. +*/ +package trace + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net" + "net/http" + "runtime" + "sort" + "strconv" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/internal/timeseries" +) + +// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. +// FOR DEBUGGING ONLY. This will slow down the program. +var DebugUseAfterFinish = false + +// AuthRequest determines whether a specific request is permitted to load the +// /debug/requests or /debug/events pages. +// +// It returns two bools; the first indicates whether the page may be viewed at all, +// and the second indicates whether sensitive events will be shown. +// +// AuthRequest may be replaced by a program to customise its authorisation requirements. +// +// The default AuthRequest function returns (true, true) iff the request comes from localhost/127.0.0.1/[::1]. +var AuthRequest = func(req *http.Request) (any, sensitive bool) { + host, _, err := net.SplitHostPort(req.RemoteAddr) + switch { + case err != nil: // Badly formed address; fail closed. + return false, false + case host == "localhost" || host == "127.0.0.1" || host == "::1": + return true, true + default: + return false, false + } +} + +func init() { + http.HandleFunc("/debug/requests", func(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + Render(w, req, sensitive) + }) + http.HandleFunc("/debug/events", func(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + RenderEvents(w, req, sensitive) + }) +} + +// Render renders the HTML page typically served at /debug/requests. +// It does not do any auth checking; see AuthRequest for the default auth check +// used by the handler registered on http.DefaultServeMux. +// req may be nil. +func Render(w io.Writer, req *http.Request, sensitive bool) { + data := &struct { + Families []string + ActiveTraceCount map[string]int + CompletedTraces map[string]*family + + // Set when a bucket has been selected. + Traces traceList + Family string + Bucket int + Expanded bool + Traced bool + Active bool + ShowSensitive bool // whether to show sensitive events + + Histogram template.HTML + HistogramWindow string // e.g. "last minute", "last hour", "all time" + + // If non-zero, the set of traces is a partial set, + // and this is the total number. + Total int + }{ + CompletedTraces: completedTraces, + } + + data.ShowSensitive = sensitive + if req != nil { + // Allow show_sensitive=0 to force hiding of sensitive data for testing. + // This only goes one way; you can't use show_sensitive=1 to see things. + if req.FormValue("show_sensitive") == "0" { + data.ShowSensitive = false + } + + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { + data.Traced = exp + } + } + + completedMu.RLock() + data.Families = make([]string, 0, len(completedTraces)) + for fam, _ := range completedTraces { + data.Families = append(data.Families, fam) + } + completedMu.RUnlock() + sort.Strings(data.Families) + + // We are careful here to minimize the time spent locking activeMu, + // since that lock is required every time an RPC starts and finishes. + data.ActiveTraceCount = make(map[string]int, len(data.Families)) + activeMu.RLock() + for fam, s := range activeTraces { + data.ActiveTraceCount[fam] = s.Len() + } + activeMu.RUnlock() + + var ok bool + data.Family, data.Bucket, ok = parseArgs(req) + switch { + case !ok: + // No-op + case data.Bucket == -1: + data.Active = true + n := data.ActiveTraceCount[data.Family] + data.Traces = getActiveTraces(data.Family) + if len(data.Traces) < n { + data.Total = n + } + case data.Bucket < bucketsPerFamily: + if b := lookupBucket(data.Family, data.Bucket); b != nil { + data.Traces = b.Copy(data.Traced) + } + default: + if f := getFamily(data.Family, false); f != nil { + var obs timeseries.Observable + f.LatencyMu.RLock() + switch o := data.Bucket - bucketsPerFamily; o { + case 0: + obs = f.Latency.Minute() + data.HistogramWindow = "last minute" + case 1: + obs = f.Latency.Hour() + data.HistogramWindow = "last hour" + case 2: + obs = f.Latency.Total() + data.HistogramWindow = "all time" + } + f.LatencyMu.RUnlock() + if obs != nil { + data.Histogram = obs.(*histogram).html() + } + } + } + + if data.Traces != nil { + defer data.Traces.Free() + sort.Sort(data.Traces) + } + + completedMu.RLock() + defer completedMu.RUnlock() + if err := pageTmpl.ExecuteTemplate(w, "Page", data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseArgs(req *http.Request) (fam string, b int, ok bool) { + if req == nil { + return "", 0, false + } + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < -1 { + return "", 0, false + } + + return fam, b, true +} + +func lookupBucket(fam string, b int) *traceBucket { + f := getFamily(fam, false) + if f == nil || b < 0 || b >= len(f.Buckets) { + return nil + } + return f.Buckets[b] +} + +type contextKeyT string + +var contextKey = contextKeyT("golang.org/x/net/trace.Trace") + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} + +// Trace represents an active request. +type Trace interface { + // LazyLog adds x to the event log. It will be evaluated each time the + // /debug/requests page is rendered. Any memory referenced by x will be + // pinned until the trace is finished and later discarded. + LazyLog(x fmt.Stringer, sensitive bool) + + // LazyPrintf evaluates its arguments with fmt.Sprintf each time the + // /debug/requests page is rendered. Any memory referenced by a will be + // pinned until the trace is finished and later discarded. + LazyPrintf(format string, a ...interface{}) + + // SetError declares that this trace resulted in an error. + SetError() + + // SetRecycler sets a recycler for the trace. + // f will be called for each event passed to LazyLog at a time when + // it is no longer required, whether while the trace is still active + // and the event is discarded, or when a completed trace is discarded. + SetRecycler(f func(interface{})) + + // SetTraceInfo sets the trace info for the trace. + // This is currently unused. + SetTraceInfo(traceID, spanID uint64) + + // SetMaxEvents sets the maximum number of events that will be stored + // in the trace. This has no effect if any events have already been + // added to the trace. + SetMaxEvents(m int) + + // Finish declares that this trace is complete. + // The trace should not be used after calling this method. + Finish() +} + +type lazySprintf struct { + format string + a []interface{} +} + +func (l *lazySprintf) String() string { + return fmt.Sprintf(l.format, l.a...) +} + +// New returns a new Trace with the specified family and title. +func New(family, title string) Trace { + tr := newTrace() + tr.ref() + tr.Family, tr.Title = family, title + tr.Start = time.Now() + tr.events = make([]event, 0, maxEventsPerTrace) + + activeMu.RLock() + s := activeTraces[tr.Family] + activeMu.RUnlock() + if s == nil { + activeMu.Lock() + s = activeTraces[tr.Family] // check again + if s == nil { + s = new(traceSet) + activeTraces[tr.Family] = s + } + activeMu.Unlock() + } + s.Add(tr) + + // Trigger allocation of the completed trace structure for this family. + // This will cause the family to be present in the request page during + // the first trace of this family. We don't care about the return value, + // nor is there any need for this to run inline, so we execute it in its + // own goroutine, but only if the family isn't allocated yet. + completedMu.RLock() + if _, ok := completedTraces[tr.Family]; !ok { + go allocFamily(tr.Family) + } + completedMu.RUnlock() + + return tr +} + +func (tr *trace) Finish() { + tr.Elapsed = time.Now().Sub(tr.Start) + if DebugUseAfterFinish { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + tr.finishStack = buf[:n] + } + + activeMu.RLock() + m := activeTraces[tr.Family] + activeMu.RUnlock() + m.Remove(tr) + + f := getFamily(tr.Family, true) + for _, b := range f.Buckets { + if b.Cond.match(tr) { + b.Add(tr) + } + } + // Add a sample of elapsed time as microseconds to the family's timeseries + h := new(histogram) + h.addMeasurement(tr.Elapsed.Nanoseconds() / 1e3) + f.LatencyMu.Lock() + f.Latency.Add(h) + f.LatencyMu.Unlock() + + tr.unref() // matches ref in New +} + +const ( + bucketsPerFamily = 9 + tracesPerBucket = 10 + maxActiveTraces = 20 // Maximum number of active traces to show. + maxEventsPerTrace = 10 + numHistogramBuckets = 38 +) + +var ( + // The active traces. + activeMu sync.RWMutex + activeTraces = make(map[string]*traceSet) // family -> traces + + // Families of completed traces. + completedMu sync.RWMutex + completedTraces = make(map[string]*family) // family -> traces +) + +type traceSet struct { + mu sync.RWMutex + m map[*trace]bool + + // We could avoid the entire map scan in FirstN by having a slice of all the traces + // ordered by start time, and an index into that from the trace struct, with a periodic + // repack of the slice after enough traces finish; we could also use a skip list or similar. + // However, that would shift some of the expense from /debug/requests time to RPC time, + // which is probably the wrong trade-off. +} + +func (ts *traceSet) Len() int { + ts.mu.RLock() + defer ts.mu.RUnlock() + return len(ts.m) +} + +func (ts *traceSet) Add(tr *trace) { + ts.mu.Lock() + if ts.m == nil { + ts.m = make(map[*trace]bool) + } + ts.m[tr] = true + ts.mu.Unlock() +} + +func (ts *traceSet) Remove(tr *trace) { + ts.mu.Lock() + delete(ts.m, tr) + ts.mu.Unlock() +} + +// FirstN returns the first n traces ordered by time. +func (ts *traceSet) FirstN(n int) traceList { + ts.mu.RLock() + defer ts.mu.RUnlock() + + if n > len(ts.m) { + n = len(ts.m) + } + trl := make(traceList, 0, n) + + // Fast path for when no selectivity is needed. + if n == len(ts.m) { + for tr := range ts.m { + tr.ref() + trl = append(trl, tr) + } + sort.Sort(trl) + return trl + } + + // Pick the oldest n traces. + // This is inefficient. See the comment in the traceSet struct. + for tr := range ts.m { + // Put the first n traces into trl in the order they occur. + // When we have n, sort trl, and thereafter maintain its order. + if len(trl) < n { + tr.ref() + trl = append(trl, tr) + if len(trl) == n { + // This is guaranteed to happen exactly once during this loop. + sort.Sort(trl) + } + continue + } + if tr.Start.After(trl[n-1].Start) { + continue + } + + // Find where to insert this one. + tr.ref() + i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) + trl[n-1].unref() + copy(trl[i+1:], trl[i:]) + trl[i] = tr + } + + return trl +} + +func getActiveTraces(fam string) traceList { + activeMu.RLock() + s := activeTraces[fam] + activeMu.RUnlock() + if s == nil { + return nil + } + return s.FirstN(maxActiveTraces) +} + +func getFamily(fam string, allocNew bool) *family { + completedMu.RLock() + f := completedTraces[fam] + completedMu.RUnlock() + if f == nil && allocNew { + f = allocFamily(fam) + } + return f +} + +func allocFamily(fam string) *family { + completedMu.Lock() + defer completedMu.Unlock() + f := completedTraces[fam] + if f == nil { + f = newFamily() + completedTraces[fam] = f + } + return f +} + +// family represents a set of trace buckets and associated latency information. +type family struct { + // traces may occur in multiple buckets. + Buckets [bucketsPerFamily]*traceBucket + + // latency time series + LatencyMu sync.RWMutex + Latency *timeseries.MinuteHourSeries +} + +func newFamily() *family { + return &family{ + Buckets: [bucketsPerFamily]*traceBucket{ + {Cond: minCond(0)}, + {Cond: minCond(50 * time.Millisecond)}, + {Cond: minCond(100 * time.Millisecond)}, + {Cond: minCond(200 * time.Millisecond)}, + {Cond: minCond(500 * time.Millisecond)}, + {Cond: minCond(1 * time.Second)}, + {Cond: minCond(10 * time.Second)}, + {Cond: minCond(100 * time.Second)}, + {Cond: errorCond{}}, + }, + Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), + } +} + +// traceBucket represents a size-capped bucket of historic traces, +// along with a condition for a trace to belong to the bucket. +type traceBucket struct { + Cond cond + + // Ring buffer implementation of a fixed-size FIFO queue. + mu sync.RWMutex + buf [tracesPerBucket]*trace + start int // < tracesPerBucket + length int // <= tracesPerBucket +} + +func (b *traceBucket) Add(tr *trace) { + b.mu.Lock() + defer b.mu.Unlock() + + i := b.start + b.length + if i >= tracesPerBucket { + i -= tracesPerBucket + } + if b.length == tracesPerBucket { + // "Remove" an element from the bucket. + b.buf[i].unref() + b.start++ + if b.start == tracesPerBucket { + b.start = 0 + } + } + b.buf[i] = tr + if b.length < tracesPerBucket { + b.length++ + } + tr.ref() +} + +// Copy returns a copy of the traces in the bucket. +// If tracedOnly is true, only the traces with trace information will be returned. +// The logs will be ref'd before returning; the caller should call +// the Free method when it is done with them. +// TODO(dsymonds): keep track of traced requests in separate buckets. +func (b *traceBucket) Copy(tracedOnly bool) traceList { + b.mu.RLock() + defer b.mu.RUnlock() + + trl := make(traceList, 0, b.length) + for i, x := 0, b.start; i < b.length; i++ { + tr := b.buf[x] + if !tracedOnly || tr.spanID != 0 { + tr.ref() + trl = append(trl, tr) + } + x++ + if x == b.length { + x = 0 + } + } + return trl +} + +func (b *traceBucket) Empty() bool { + b.mu.RLock() + defer b.mu.RUnlock() + return b.length == 0 +} + +// cond represents a condition on a trace. +type cond interface { + match(t *trace) bool + String() string +} + +type minCond time.Duration + +func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } +func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } + +type errorCond struct{} + +func (e errorCond) match(t *trace) bool { return t.IsError } +func (e errorCond) String() string { return "errors" } + +type traceList []*trace + +// Free calls unref on each element of the list. +func (trl traceList) Free() { + for _, t := range trl { + t.unref() + } +} + +// traceList may be sorted in reverse chronological order. +func (trl traceList) Len() int { return len(trl) } +func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } +func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } + +// An event is a timestamped log entry in a trace. +type event struct { + When time.Time + Elapsed time.Duration // since previous event in trace + NewDay bool // whether this event is on a different day to the previous event + Recyclable bool // whether this event was passed via LazyLog + What interface{} // string or fmt.Stringer + Sensitive bool // whether this event contains sensitive information +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e event) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// discarded represents a number of discarded events. +// It is stored as *discarded to make it easier to update in-place. +type discarded int + +func (d *discarded) String() string { + return fmt.Sprintf("(%d events discarded)", int(*d)) +} + +// trace represents an active or complete request, +// either sent or received by this program. +type trace struct { + // Family is the top-level grouping of traces to which this belongs. + Family string + + // Title is the title of this trace. + Title string + + // Timing information. + Start time.Time + Elapsed time.Duration // zero while active + + // Trace information if non-zero. + traceID uint64 + spanID uint64 + + // Whether this trace resulted in an error. + IsError bool + + // Append-only sequence of events (modulo discards). + mu sync.RWMutex + events []event + + refs int32 // how many buckets this is in + recycler func(interface{}) + disc discarded // scratch space to avoid allocation + + finishStack []byte // where finish was called, if DebugUseAfterFinish is set +} + +func (tr *trace) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + tr.Family = "" + tr.Title = "" + tr.Start = time.Time{} + tr.Elapsed = 0 + tr.traceID = 0 + tr.spanID = 0 + tr.IsError = false + tr.events = nil + tr.refs = 0 + tr.recycler = nil + tr.disc = 0 + tr.finishStack = nil +} + +// delta returns the elapsed time since the last event or the trace start, +// and whether it spans midnight. +// L >= tr.mu +func (tr *trace) delta(t time.Time) (time.Duration, bool) { + if len(tr.events) == 0 { + return t.Sub(tr.Start), false + } + prev := tr.events[len(tr.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() +} + +func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { + if DebugUseAfterFinish && tr.finishStack != nil { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) + } + + /* + NOTE TO DEBUGGERS + + If you are here because your program panicked in this code, + it is almost definitely the fault of code using this package, + and very unlikely to be the fault of this code. + + The most likely scenario is that some code elsewhere is using + a requestz.Trace after its Finish method is called. + You can temporarily set the DebugUseAfterFinish var + to help discover where that is; do not leave that var set, + since it makes this package much less efficient. + */ + + e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} + tr.mu.Lock() + e.Elapsed, e.NewDay = tr.delta(e.When) + if len(tr.events) < cap(tr.events) { + tr.events = append(tr.events, e) + } else { + // Discard the middle events. + di := int((cap(tr.events) - 1) / 2) + if d, ok := tr.events[di].What.(*discarded); ok { + (*d)++ + } else { + // disc starts at two to count for the event it is replacing, + // plus the next one that we are about to drop. + tr.disc = 2 + if tr.recycler != nil && tr.events[di].Recyclable { + go tr.recycler(tr.events[di].What) + } + tr.events[di].What = &tr.disc + } + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + tr.events[di].When = tr.events[di+1].When + + if tr.recycler != nil && tr.events[di+1].Recyclable { + go tr.recycler(tr.events[di+1].What) + } + copy(tr.events[di+1:], tr.events[di+2:]) + tr.events[cap(tr.events)-1] = e + } + tr.mu.Unlock() +} + +func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { + tr.addEvent(x, true, sensitive) +} + +func (tr *trace) LazyPrintf(format string, a ...interface{}) { + tr.addEvent(&lazySprintf{format, a}, false, false) +} + +func (tr *trace) SetError() { tr.IsError = true } + +func (tr *trace) SetRecycler(f func(interface{})) { + tr.recycler = f +} + +func (tr *trace) SetTraceInfo(traceID, spanID uint64) { + tr.traceID, tr.spanID = traceID, spanID +} + +func (tr *trace) SetMaxEvents(m int) { + // Always keep at least three events: first, discarded count, last. + if len(tr.events) == 0 && m > 3 { + tr.events = make([]event, 0, m) + } +} + +func (tr *trace) ref() { + atomic.AddInt32(&tr.refs, 1) +} + +func (tr *trace) unref() { + if atomic.AddInt32(&tr.refs, -1) == 0 { + if tr.recycler != nil { + // freeTrace clears tr, so we hold tr.recycler and tr.events here. + go func(f func(interface{}), es []event) { + for _, e := range es { + if e.Recyclable { + f(e.What) + } + } + }(tr.recycler, tr.events) + } + + freeTrace(tr) + } +} + +func (tr *trace) When() string { + return tr.Start.Format("2006/01/02 15:04:05.000000") +} + +func (tr *trace) ElapsedTime() string { + t := tr.Elapsed + if t == 0 { + // Active trace. + t = time.Since(tr.Start) + } + return fmt.Sprintf("%.6f", t.Seconds()) +} + +func (tr *trace) Events() []event { + tr.mu.RLock() + defer tr.mu.RUnlock() + return tr.events +} + +var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? + +// newTrace returns a trace ready to use. +func newTrace() *trace { + select { + case tr := <-traceFreeList: + return tr + default: + return new(trace) + } +} + +// freeTrace adds tr to traceFreeList if there's room. +// This is non-blocking. +func freeTrace(tr *trace) { + if DebugUseAfterFinish { + return // never reuse + } + tr.reset() + select { + case traceFreeList <- tr: + default: + } +} + +func elapsed(d time.Duration) string { + b := []byte(fmt.Sprintf("%.6f", d.Seconds())) + + // For subsecond durations, blank all zeros before decimal point, + // and all zeros between the decimal point and the first non-zero digit. + if d < time.Second { + dot := bytes.IndexByte(b, '.') + for i := 0; i < dot; i++ { + b[i] = ' ' + } + for i := dot + 1; i < len(b); i++ { + if b[i] == '0' { + b[i] = ' ' + } else { + break + } + } + } + + return string(b) +} + +var pageTmpl = template.Must(template.New("Page").Funcs(template.FuncMap{ + "elapsed": elapsed, + "add": func(a, b int) int { return a + b }, +}).Parse(pageHTML)) + +const pageHTML = ` +{{template "Prolog" .}} +{{template "StatusTable" .}} +{{template "Epilog" .}} + +{{define "Prolog"}} + + + /debug/requests + + + + +

/debug/requests

+{{end}} {{/* end of Prolog */}} + +{{define "StatusTable"}} + + {{range $fam := .Families}} + + + + {{$n := index $.ActiveTraceCount $fam}} + + + {{$f := index $.CompletedTraces $fam}} + {{range $i, $b := $f.Buckets}} + {{$empty := $b.Empty}} + + {{end}} + + {{$nb := len $f.Buckets}} + + + + + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} active] + {{if $n}}{{end}} + + {{if not $empty}}{{end}} + [{{.Cond}}] + {{if not $empty}}{{end}} + + [minute] + + [hour] + + [total] +
+{{end}} {{/* end of StatusTable */}} + +{{define "Epilog"}} +{{if $.Traces}} +
+

Family: {{$.Family}}

+ +{{if or $.Expanded $.Traced}} + [Normal/Summary] +{{else}} + [Normal/Summary] +{{end}} + +{{if or (not $.Expanded) $.Traced}} + [Normal/Expanded] +{{else}} + [Normal/Expanded] +{{end}} + +{{if not $.Active}} + {{if or $.Expanded (not $.Traced)}} + [Traced/Summary] + {{else}} + [Traced/Summary] + {{end}} + {{if or (not $.Expanded) (not $.Traced)}} + [Traced/Expanded] + {{else}} + [Traced/Expanded] + {{end}} +{{end}} + +{{if $.Total}} +

Showing {{len $.Traces}} of {{$.Total}} traces.

+{{end}} + + + + + {{range $tr := $.Traces}} + + + + + {{/* TODO: include traceID/spanID */}} + + {{if $.Expanded}} + {{range $tr.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
+ {{if $.Active}}Active{{else}}Completed{{end}} Requests +
WhenElapsed (s)
{{$tr.When}}{{$tr.ElapsedTime}}{{$tr.Title}}
{{.WhenString}}{{elapsed .Elapsed}}{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
+{{end}} {{/* if $.Traces */}} + +{{if $.Histogram}} +

Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

+{{$.Histogram}} +{{end}} {{/* if $.Histogram */}} + + + +{{end}} {{/* end of Epilog */}} +` diff --git a/Godeps/_workspace/src/golang.org/x/net/trace/trace_test.go b/Godeps/_workspace/src/golang.org/x/net/trace/trace_test.go new file mode 100644 index 000000000..c2f5fcbaa --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/net/trace/trace_test.go @@ -0,0 +1,46 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "reflect" + "testing" +) + +type s struct{} + +func (s) String() string { return "lazy string" } + +// TestReset checks whether all the fields are zeroed after reset. +func TestReset(t *testing.T) { + tr := New("foo", "bar") + tr.LazyLog(s{}, false) + tr.LazyPrintf("%d", 1) + tr.SetRecycler(func(_ interface{}) {}) + tr.SetTraceInfo(3, 4) + tr.SetMaxEvents(100) + tr.SetError() + tr.Finish() + + tr.(*trace).reset() + + if !reflect.DeepEqual(tr, new(trace)) { + t.Errorf("reset didn't clear all fields: %+v", tr) + } +} + +// TestResetLog checks whether all the fields are zeroed after reset. +func TestResetLog(t *testing.T) { + el := NewEventLog("foo", "bar") + el.Printf("message") + el.Errorf("error") + el.Finish() + + el.(*eventLog).reset() + + if !reflect.DeepEqual(el, new(eventLog)) { + t.Errorf("reset didn't clear all fields: %+v", el) + } +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/.travis.yml b/Godeps/_workspace/src/golang.org/x/oauth2/.travis.yml new file mode 100644 index 000000000..a035125c3 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/.travis.yml @@ -0,0 +1,14 @@ +language: go + +go: + - 1.3 + - 1.4 + +install: + - export GOPATH="$HOME/gopath" + - mkdir -p "$GOPATH/src/golang.org/x" + - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2" + - go get -v -t -d golang.org/x/oauth2/... + +script: + - go test -v golang.org/x/oauth2/... diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/AUTHORS b/Godeps/_workspace/src/golang.org/x/oauth2/AUTHORS new file mode 100644 index 000000000..15167cd74 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTING.md b/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTING.md new file mode 100644 index 000000000..46aa2b12d --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + + +## Filing issues + +When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +**We do not accept GitHub pull requests** +(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. + diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTORS b/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTORS new file mode 100644 index 000000000..1c4577e96 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/LICENSE b/Godeps/_workspace/src/golang.org/x/oauth2/LICENSE new file mode 100644 index 000000000..d02f24fd5 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The oauth2 Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/README.md b/Godeps/_workspace/src/golang.org/x/oauth2/README.md new file mode 100644 index 000000000..0d5141733 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/README.md @@ -0,0 +1,64 @@ +# OAuth2 for Go + +[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2) + +oauth2 package contains a client implementation for OAuth 2.0 spec. + +## Installation + +~~~~ +go get golang.org/x/oauth2 +~~~~ + +See godoc for further documentation and examples. + +* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2) +* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google) + + +## App Engine + +In change 96e89be (March 2015) we removed the `oauth2.Context2` type in favor +of the [`context.Context`](https://golang.org/x/net/context#Context) type from +the `golang.org/x/net/context` package + +This means its no longer possible to use the "Classic App Engine" +`appengine.Context` type with the `oauth2` package. (You're using +Classic App Engine if you import the package `"appengine"`.) + +To work around this, you may use the new `"google.golang.org/appengine"` +package. This package has almost the same API as the `"appengine"` package, +but it can be fetched with `go get` and used on "Managed VMs" and well as +Classic App Engine. + +See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app) +for information on updating your app. + +If you don't want to update your entire app to use the new App Engine packages, +you may use both sets of packages in parallel, using only the new packages +with the `oauth2` package. + + import ( + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + newappengine "google.golang.org/appengine" + newurlfetch "google.golang.org/appengine/urlfetch" + + "appengine" + ) + + func handler(w http.ResponseWriter, r *http.Request) { + var c appengine.Context = appengine.NewContext(r) + c.Infof("Logging a message with the old package") + + var ctx context.Context = newappengine.NewContext(r) + client := &http.Client{ + Transport: &oauth2.Transport{ + Source: google.AppEngineTokenSource(ctx, "scope"), + Base: &newurlfetch.Transport{Context: ctx}, + }, + } + client.Get("...") + } + diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/client_appengine.go b/Godeps/_workspace/src/golang.org/x/oauth2/client_appengine.go new file mode 100644 index 000000000..52f8d1dd6 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/client_appengine.go @@ -0,0 +1,25 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine + +// App Engine hooks. + +package oauth2 + +import ( + "net/http" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" + "google.golang.org/appengine/urlfetch" +) + +func init() { + internal.RegisterContextClientFunc(contextClientAppEngine) +} + +func contextClientAppEngine(ctx context.Context) (*http.Client, error) { + return urlfetch.Client(ctx), nil +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/clientcredentials/clientcredentials.go b/Godeps/_workspace/src/golang.org/x/oauth2/clientcredentials/clientcredentials.go new file mode 100644 index 000000000..452fb8c12 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/clientcredentials/clientcredentials.go @@ -0,0 +1,112 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package clientcredentials implements the OAuth2.0 "client credentials" token flow, +// also known as the "two-legged OAuth 2.0". +// +// This should be used when the client is acting on its own behalf or when the client +// is the resource owner. It may also be used when requesting access to protected +// resources based on an authorization previously arranged with the authorization +// server. +// +// See http://tools.ietf.org/html/draft-ietf-oauth-v2-31#section-4.4 +package clientcredentials + +import ( + "net/http" + "net/url" + "strings" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" +) + +// tokenFromInternal maps an *internal.Token struct into +// an *oauth2.Token struct. +func tokenFromInternal(t *internal.Token) *oauth2.Token { + if t == nil { + return nil + } + tk := &oauth2.Token{ + AccessToken: t.AccessToken, + TokenType: t.TokenType, + RefreshToken: t.RefreshToken, + Expiry: t.Expiry, + } + return tk.WithExtra(t.Raw) +} + +// retrieveToken takes a *Config and uses that to retrieve an *internal.Token. +// This token is then mapped from *internal.Token into an *oauth2.Token which is +// returned along with an error. +func retrieveToken(ctx context.Context, c *Config, v url.Values) (*oauth2.Token, error) { + tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.TokenURL, v) + if err != nil { + return nil, err + } + return tokenFromInternal(tk), nil +} + +// Client Credentials Config describes a 2-legged OAuth2 flow, with both the +// client application information and the server's endpoint URLs. +type Config struct { + // ClientID is the application's ID. + ClientID string + + // ClientSecret is the application's secret. + ClientSecret string + + // TokenURL is the resource server's token endpoint + // URL. This is a constant specific to each server. + TokenURL string + + // Scope specifies optional requested permissions. + Scopes []string +} + +// Token uses client credentials to retreive a token. +// The HTTP client to use is derived from the context. +// If nil, http.DefaultClient is used. +func (c *Config) Token(ctx context.Context) (*oauth2.Token, error) { + return retrieveToken(ctx, c, url.Values{ + "grant_type": {"client_credentials"}, + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + }) +} + +// Client returns an HTTP client using the provided token. +// The token will auto-refresh as necessary. The underlying +// HTTP transport will be obtained using the provided context. +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context) *http.Client { + return oauth2.NewClient(ctx, c.TokenSource(ctx)) +} + +// TokenSource returns a TokenSource that returns t until t expires, +// automatically refreshing it as necessary using the provided context and the +// client ID and client secret. +// +// Most users will use Config.Client instead. +func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { + source := &tokenSource{ + ctx: ctx, + conf: c, + } + return oauth2.ReuseTokenSource(nil, source) +} + +type tokenSource struct { + ctx context.Context + conf *Config +} + +// Token refreshes the token by using a new client credentials request. +// tokens received this way do not include a refresh token +func (c *tokenSource) Token() (*oauth2.Token, error) { + return retrieveToken(c.ctx, c.conf, url.Values{ + "grant_type": {"client_credentials"}, + "scope": internal.CondVal(strings.Join(c.conf.Scopes, " ")), + }) +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/clientcredentials/clientcredentials_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/clientcredentials/clientcredentials_test.go new file mode 100644 index 000000000..ab319e082 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/clientcredentials/clientcredentials_test.go @@ -0,0 +1,96 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package clientcredentials + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + + "golang.org/x/oauth2" +) + +func newConf(url string) *Config { + return &Config{ + ClientID: "CLIENT_ID", + ClientSecret: "CLIENT_SECRET", + Scopes: []string{"scope1", "scope2"}, + TokenURL: url + "/token", + } +} + +type mockTransport struct { + rt func(req *http.Request) (resp *http.Response, err error) +} + +func (t *mockTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) { + return t.rt(req) +} + +func TestTokenRequest(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() != "/token" { + t.Errorf("authenticate client request URL = %q; want %q", r.URL, "/token") + } + headerAuth := r.Header.Get("Authorization") + if headerAuth != "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" { + t.Errorf("Unexpected authorization header, %v is found.", headerAuth) + } + if got, want := r.Header.Get("Content-Type"), "application/x-www-form-urlencoded"; got != want { + t.Errorf("Content-Type header = %q; want %q", got, want) + } + body, err := ioutil.ReadAll(r.Body) + if err != nil { + r.Body.Close() + } + if err != nil { + t.Errorf("failed reading request body: %s.", err) + } + if string(body) != "client_id=CLIENT_ID&grant_type=client_credentials&scope=scope1+scope2" { + t.Errorf("payload = %q; want %q", string(body), "client_id=CLIENT_ID&grant_type=client_credentials&scope=scope1+scope2") + } + w.Header().Set("Content-Type", "application/x-www-form-urlencoded") + w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&token_type=bearer")) + })) + defer ts.Close() + conf := newConf(ts.URL) + tok, err := conf.Token(oauth2.NoContext) + if err != nil { + t.Error(err) + } + if !tok.Valid() { + t.Fatalf("token invalid. got: %#v", tok) + } + if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" { + t.Errorf("Access token = %q; want %q", tok.AccessToken, "90d64460d14870c08c81352a05dedd3465940a7c") + } + if tok.TokenType != "bearer" { + t.Errorf("token type = %q; want %q", tok.TokenType, "bearer") + } +} + +func TestTokenRefreshRequest(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() == "/somethingelse" { + return + } + if r.URL.String() != "/token" { + t.Errorf("Unexpected token refresh request URL, %v is found.", r.URL) + } + headerContentType := r.Header.Get("Content-Type") + if headerContentType != "application/x-www-form-urlencoded" { + t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType) + } + body, _ := ioutil.ReadAll(r.Body) + if string(body) != "client_id=CLIENT_ID&grant_type=client_credentials&scope=scope1+scope2" { + t.Errorf("Unexpected refresh token payload, %v is found.", string(body)) + } + })) + defer ts.Close() + conf := newConf(ts.URL) + c := conf.Client(oauth2.NoContext) + c.Get(ts.URL + "/somethingelse") +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/example_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/example_test.go new file mode 100644 index 000000000..8be278855 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/example_test.go @@ -0,0 +1,45 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2_test + +import ( + "fmt" + "log" + + "golang.org/x/oauth2" +) + +func ExampleConfig() { + conf := &oauth2.Config{ + ClientID: "YOUR_CLIENT_ID", + ClientSecret: "YOUR_CLIENT_SECRET", + Scopes: []string{"SCOPE1", "SCOPE2"}, + Endpoint: oauth2.Endpoint{ + AuthURL: "https://provider.com/o/oauth2/auth", + TokenURL: "https://provider.com/o/oauth2/token", + }, + } + + // Redirect user to consent page to ask for permission + // for the scopes specified above. + url := conf.AuthCodeURL("state", oauth2.AccessTypeOffline) + fmt.Printf("Visit the URL for the auth dialog: %v", url) + + // Use the authorization code that is pushed to the redirect URL. + // NewTransportWithCode will do the handshake to retrieve + // an access token and initiate a Transport that is + // authorized and authenticated by the retrieved token. + var code string + if _, err := fmt.Scan(&code); err != nil { + log.Fatal(err) + } + tok, err := conf.Exchange(oauth2.NoContext, code) + if err != nil { + log.Fatal(err) + } + + client := conf.Client(oauth2.NoContext, tok) + client.Get("...") +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/facebook/facebook.go b/Godeps/_workspace/src/golang.org/x/oauth2/facebook/facebook.go new file mode 100644 index 000000000..9c816ff80 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/facebook/facebook.go @@ -0,0 +1,16 @@ +// Copyright 2015 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package facebook provides constants for using OAuth2 to access Facebook. +package facebook + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is Facebook's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://www.facebook.com/dialog/oauth", + TokenURL: "https://graph.facebook.com/oauth/access_token", +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/github/github.go b/Godeps/_workspace/src/golang.org/x/oauth2/github/github.go new file mode 100644 index 000000000..82ca623dd --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/github/github.go @@ -0,0 +1,16 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package github provides constants for using OAuth2 to access Github. +package github + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is Github's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://github.com/login/oauth/authorize", + TokenURL: "https://github.com/login/oauth/access_token", +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine.go new file mode 100644 index 000000000..65dc34731 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine.go @@ -0,0 +1,83 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "sort" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2" +) + +// Set at init time by appengine_hook.go. If nil, we're not on App Engine. +var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error) + +// AppEngineTokenSource returns a token source that fetches tokens +// issued to the current App Engine application's service account. +// If you are implementing a 3-legged OAuth 2.0 flow on App Engine +// that involves user accounts, see oauth2.Config instead. +// +// The provided context must have come from appengine.NewContext. +func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { + if appengineTokenFunc == nil { + panic("google: AppEngineTokenSource can only be used on App Engine.") + } + scopes := append([]string{}, scope...) + sort.Strings(scopes) + return &appEngineTokenSource{ + ctx: ctx, + scopes: scopes, + key: strings.Join(scopes, " "), + } +} + +// aeTokens helps the fetched tokens to be reused until their expiration. +var ( + aeTokensMu sync.Mutex + aeTokens = make(map[string]*tokenLock) // key is space-separated scopes +) + +type tokenLock struct { + mu sync.Mutex // guards t; held while fetching or updating t + t *oauth2.Token +} + +type appEngineTokenSource struct { + ctx context.Context + scopes []string + key string // to aeTokens map; space-separated scopes +} + +func (ts *appEngineTokenSource) Token() (*oauth2.Token, error) { + if appengineTokenFunc == nil { + panic("google: AppEngineTokenSource can only be used on App Engine.") + } + + aeTokensMu.Lock() + tok, ok := aeTokens[ts.key] + if !ok { + tok = &tokenLock{} + aeTokens[ts.key] = tok + } + aeTokensMu.Unlock() + + tok.mu.Lock() + defer tok.mu.Unlock() + if tok.t.Valid() { + return tok.t, nil + } + access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...) + if err != nil { + return nil, err + } + tok.t = &oauth2.Token{ + AccessToken: access, + Expiry: exp, + } + return tok.t, nil +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine_hook.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine_hook.go new file mode 100644 index 000000000..362766fdd --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine_hook.go @@ -0,0 +1,13 @@ +// Copyright 2015 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine + +package google + +import "google.golang.org/appengine" + +func init() { + appengineTokenFunc = appengine.AccessToken +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/default.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/default.go new file mode 100644 index 000000000..78f808985 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/default.go @@ -0,0 +1,154 @@ +// Copyright 2015 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "runtime" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/jwt" + "google.golang.org/cloud/compute/metadata" +) + +// DefaultClient returns an HTTP Client that uses the +// DefaultTokenSource to obtain authentication credentials. +// +// This client should be used when developing services +// that run on Google App Engine or Google Compute Engine +// and use "Application Default Credentials." +// +// For more details, see: +// https://developers.google.com/accounts/docs/application-default-credentials +// +func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { + ts, err := DefaultTokenSource(ctx, scope...) + if err != nil { + return nil, err + } + return oauth2.NewClient(ctx, ts), nil +} + +// DefaultTokenSource is a token source that uses +// "Application Default Credentials". +// +// It looks for credentials in the following places, +// preferring the first location found: +// +// 1. A JSON file whose path is specified by the +// GOOGLE_APPLICATION_CREDENTIALS environment variable. +// 2. A JSON file in a location known to the gcloud command-line tool. +// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. +// On other systems, $HOME/.config/gcloud/application_default_credentials.json. +// 3. On Google App Engine it uses the appengine.AccessToken function. +// 4. On Google Compute Engine, it fetches credentials from the metadata server. +// (In this final case any provided scopes are ignored.) +// +// For more details, see: +// https://developers.google.com/accounts/docs/application-default-credentials +// +func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) { + // First, try the environment variable. + const envVar = "GOOGLE_APPLICATION_CREDENTIALS" + if filename := os.Getenv(envVar); filename != "" { + ts, err := tokenSourceFromFile(ctx, filename, scope) + if err != nil { + return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err) + } + return ts, nil + } + + // Second, try a well-known file. + filename := wellKnownFile() + _, err := os.Stat(filename) + if err == nil { + ts, err2 := tokenSourceFromFile(ctx, filename, scope) + if err2 == nil { + return ts, nil + } + err = err2 + } else if os.IsNotExist(err) { + err = nil // ignore this error + } + if err != nil { + return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) + } + + // Third, if we're on Google App Engine use those credentials. + if appengineTokenFunc != nil { + return AppEngineTokenSource(ctx, scope...), nil + } + + // Fourth, if we're on Google Compute Engine use the metadata server. + if metadata.OnGCE() { + return ComputeTokenSource(""), nil + } + + // None are found; return helpful error. + const url = "https://developers.google.com/accounts/docs/application-default-credentials" + return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url) +} + +func wellKnownFile() string { + const f = "application_default_credentials.json" + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud", f) + } + return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f) +} + +func tokenSourceFromFile(ctx context.Context, filename string, scopes []string) (oauth2.TokenSource, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + var d struct { + // Common fields + Type string + ClientID string `json:"client_id"` + + // User Credential fields + ClientSecret string `json:"client_secret"` + RefreshToken string `json:"refresh_token"` + + // Service Account fields + ClientEmail string `json:"client_email"` + PrivateKeyID string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + } + if err := json.Unmarshal(b, &d); err != nil { + return nil, err + } + switch d.Type { + case "authorized_user": + cfg := &oauth2.Config{ + ClientID: d.ClientID, + ClientSecret: d.ClientSecret, + Scopes: append([]string{}, scopes...), // copy + Endpoint: Endpoint, + } + tok := &oauth2.Token{RefreshToken: d.RefreshToken} + return cfg.TokenSource(ctx, tok), nil + case "service_account": + cfg := &jwt.Config{ + Email: d.ClientEmail, + PrivateKey: []byte(d.PrivateKey), + Scopes: append([]string{}, scopes...), // copy + TokenURL: JWTTokenURL, + } + return cfg.TokenSource(ctx), nil + case "": + return nil, errors.New("missing 'type' field in credentials") + default: + return nil, fmt.Errorf("unknown credential type: %q", d.Type) + } +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/example_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/example_test.go new file mode 100644 index 000000000..17262802a --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/example_test.go @@ -0,0 +1,150 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appenginevm !appengine + +package google_test + +import ( + "fmt" + "io/ioutil" + "log" + "net/http" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "golang.org/x/oauth2/jwt" + "google.golang.org/appengine" + "google.golang.org/appengine/urlfetch" +) + +func ExampleDefaultClient() { + client, err := google.DefaultClient(oauth2.NoContext, + "https://www.googleapis.com/auth/devstorage.full_control") + if err != nil { + log.Fatal(err) + } + client.Get("...") +} + +func Example_webServer() { + // Your credentials should be obtained from the Google + // Developer Console (https://console.developers.google.com). + conf := &oauth2.Config{ + ClientID: "YOUR_CLIENT_ID", + ClientSecret: "YOUR_CLIENT_SECRET", + RedirectURL: "YOUR_REDIRECT_URL", + Scopes: []string{ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/blogger", + }, + Endpoint: google.Endpoint, + } + // Redirect user to Google's consent page to ask for permission + // for the scopes specified above. + url := conf.AuthCodeURL("state") + fmt.Printf("Visit the URL for the auth dialog: %v", url) + + // Handle the exchange code to initiate a transport. + tok, err := conf.Exchange(oauth2.NoContext, "authorization-code") + if err != nil { + log.Fatal(err) + } + client := conf.Client(oauth2.NoContext, tok) + client.Get("...") +} + +func ExampleJWTConfigFromJSON() { + // Your credentials should be obtained from the Google + // Developer Console (https://console.developers.google.com). + // Navigate to your project, then see the "Credentials" page + // under "APIs & Auth". + // To create a service account client, click "Create new Client ID", + // select "Service Account", and click "Create Client ID". A JSON + // key file will then be downloaded to your computer. + data, err := ioutil.ReadFile("/path/to/your-project-key.json") + if err != nil { + log.Fatal(err) + } + conf, err := google.JWTConfigFromJSON(data, "https://www.googleapis.com/auth/bigquery") + if err != nil { + log.Fatal(err) + } + // Initiate an http.Client. The following GET request will be + // authorized and authenticated on the behalf of + // your service account. + client := conf.Client(oauth2.NoContext) + client.Get("...") +} + +func ExampleSDKConfig() { + // The credentials will be obtained from the first account that + // has been authorized with `gcloud auth login`. + conf, err := google.NewSDKConfig("") + if err != nil { + log.Fatal(err) + } + // Initiate an http.Client. The following GET request will be + // authorized and authenticated on the behalf of the SDK user. + client := conf.Client(oauth2.NoContext) + client.Get("...") +} + +func Example_serviceAccount() { + // Your credentials should be obtained from the Google + // Developer Console (https://console.developers.google.com). + conf := &jwt.Config{ + Email: "xxx@developer.gserviceaccount.com", + // The contents of your RSA private key or your PEM file + // that contains a private key. + // If you have a p12 file instead, you + // can use `openssl` to export the private key into a pem file. + // + // $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes + // + // The field only supports PEM containers with no passphrase. + // The openssl command will convert p12 keys to passphrase-less PEM containers. + PrivateKey: []byte("-----BEGIN RSA PRIVATE KEY-----..."), + Scopes: []string{ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/blogger", + }, + TokenURL: google.JWTTokenURL, + // If you would like to impersonate a user, you can + // create a transport with a subject. The following GET + // request will be made on the behalf of user@example.com. + // Optional. + Subject: "user@example.com", + } + // Initiate an http.Client, the following GET request will be + // authorized and authenticated on the behalf of user@example.com. + client := conf.Client(oauth2.NoContext) + client.Get("...") +} + +func ExampleAppEngineTokenSource() { + var req *http.Request // from the ServeHTTP handler + ctx := appengine.NewContext(req) + client := &http.Client{ + Transport: &oauth2.Transport{ + Source: google.AppEngineTokenSource(ctx, "https://www.googleapis.com/auth/bigquery"), + Base: &urlfetch.Transport{ + Context: ctx, + }, + }, + } + client.Get("...") +} + +func ExampleComputeTokenSource() { + client := &http.Client{ + Transport: &oauth2.Transport{ + // Fetch from Google Compute Engine's metadata server to retrieve + // an access token for the provided account. + // If no account is specified, "default" is used. + Source: google.ComputeTokenSource(""), + }, + } + client.Get("...") +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/google.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/google.go new file mode 100644 index 000000000..2077d9866 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/google.go @@ -0,0 +1,145 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package google provides support for making OAuth2 authorized and +// authenticated HTTP requests to Google APIs. +// It supports the Web server flow, client-side credentials, service accounts, +// Google Compute Engine service accounts, and Google App Engine service +// accounts. +// +// For more information, please read +// https://developers.google.com/accounts/docs/OAuth2 +// and +// https://developers.google.com/accounts/docs/application-default-credentials. +package google + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/jwt" + "google.golang.org/cloud/compute/metadata" +) + +// Endpoint is Google's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://accounts.google.com/o/oauth2/auth", + TokenURL: "https://accounts.google.com/o/oauth2/token", +} + +// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow. +const JWTTokenURL = "https://accounts.google.com/o/oauth2/token" + +// ConfigFromJSON uses a Google Developers Console client_credentials.json +// file to construct a config. +// client_credentials.json can be downloadable from https://console.developers.google.com, +// under "APIs & Auth" > "Credentials". Download the Web application credentials in the +// JSON format and provide the contents of the file as jsonKey. +func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) { + type cred struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + RedirectURIs []string `json:"redirect_uris"` + AuthURI string `json:"auth_uri"` + TokenURI string `json:"token_uri"` + } + var j struct { + Web *cred `json:"web"` + Installed *cred `json:"installed"` + } + if err := json.Unmarshal(jsonKey, &j); err != nil { + return nil, err + } + var c *cred + switch { + case j.Web != nil: + c = j.Web + case j.Installed != nil: + c = j.Installed + default: + return nil, fmt.Errorf("oauth2/google: no credentials found") + } + if len(c.RedirectURIs) < 1 { + return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json") + } + return &oauth2.Config{ + ClientID: c.ClientID, + ClientSecret: c.ClientSecret, + RedirectURL: c.RedirectURIs[0], + Scopes: scope, + Endpoint: oauth2.Endpoint{ + AuthURL: c.AuthURI, + TokenURL: c.TokenURI, + }, + }, nil +} + +// JWTConfigFromJSON uses a Google Developers service account JSON key file to read +// the credentials that authorize and authenticate the requests. +// Create a service account on "Credentials" page under "APIs & Auth" for your +// project at https://console.developers.google.com to download a JSON key file. +func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { + var key struct { + Email string `json:"client_email"` + PrivateKey string `json:"private_key"` + } + if err := json.Unmarshal(jsonKey, &key); err != nil { + return nil, err + } + return &jwt.Config{ + Email: key.Email, + PrivateKey: []byte(key.PrivateKey), + Scopes: scope, + TokenURL: JWTTokenURL, + }, nil +} + +// ComputeTokenSource returns a token source that fetches access tokens +// from Google Compute Engine (GCE)'s metadata server. It's only valid to use +// this token source if your program is running on a GCE instance. +// If no account is specified, "default" is used. +// Further information about retrieving access tokens from the GCE metadata +// server can be found at https://cloud.google.com/compute/docs/authentication. +func ComputeTokenSource(account string) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, computeSource{account: account}) +} + +type computeSource struct { + account string +} + +func (cs computeSource) Token() (*oauth2.Token, error) { + if !metadata.OnGCE() { + return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE") + } + acct := cs.account + if acct == "" { + acct = "default" + } + tokenJSON, err := metadata.Get("instance/service-accounts/" + acct + "/token") + if err != nil { + return nil, err + } + var res struct { + AccessToken string `json:"access_token"` + ExpiresInSec int `json:"expires_in"` + TokenType string `json:"token_type"` + } + err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res) + if err != nil { + return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err) + } + if res.ExpiresInSec == 0 || res.AccessToken == "" { + return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata") + } + return &oauth2.Token{ + AccessToken: res.AccessToken, + TokenType: res.TokenType, + Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second), + }, nil +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/google_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/google_test.go new file mode 100644 index 000000000..4cc01884b --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/google_test.go @@ -0,0 +1,67 @@ +// Copyright 2015 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "strings" + "testing" +) + +var webJSONKey = []byte(` +{ + "web": { + "auth_uri": "https://google.com/o/oauth2/auth", + "client_secret": "3Oknc4jS_wA2r9i", + "token_uri": "https://google.com/o/oauth2/token", + "client_email": "222-nprqovg5k43uum874cs9osjt2koe97g8@developer.gserviceaccount.com", + "redirect_uris": ["https://www.example.com/oauth2callback"], + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/222-nprqovg5k43uum874cs9osjt2koe97g8@developer.gserviceaccount.com", + "client_id": "222-nprqovg5k43uum874cs9osjt2koe97g8.apps.googleusercontent.com", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "javascript_origins": ["https://www.example.com"] + } +}`) + +var installedJSONKey = []byte(`{ + "installed": { + "client_id": "222-installed.apps.googleusercontent.com", + "redirect_uris": ["https://www.example.com/oauth2callback"] + } +}`) + +func TestConfigFromJSON(t *testing.T) { + conf, err := ConfigFromJSON(webJSONKey, "scope1", "scope2") + if err != nil { + t.Error(err) + } + if got, want := conf.ClientID, "222-nprqovg5k43uum874cs9osjt2koe97g8.apps.googleusercontent.com"; got != want { + t.Errorf("ClientID = %q; want %q", got, want) + } + if got, want := conf.ClientSecret, "3Oknc4jS_wA2r9i"; got != want { + t.Errorf("ClientSecret = %q; want %q", got, want) + } + if got, want := conf.RedirectURL, "https://www.example.com/oauth2callback"; got != want { + t.Errorf("RedictURL = %q; want %q", got, want) + } + if got, want := strings.Join(conf.Scopes, ","), "scope1,scope2"; got != want { + t.Errorf("Scopes = %q; want %q", got, want) + } + if got, want := conf.Endpoint.AuthURL, "https://google.com/o/oauth2/auth"; got != want { + t.Errorf("AuthURL = %q; want %q", got, want) + } + if got, want := conf.Endpoint.TokenURL, "https://google.com/o/oauth2/token"; got != want { + t.Errorf("TokenURL = %q; want %q", got, want) + } +} + +func TestConfigFromJSON_Installed(t *testing.T) { + conf, err := ConfigFromJSON(installedJSONKey) + if err != nil { + t.Error(err) + } + if got, want := conf.ClientID, "222-installed.apps.googleusercontent.com"; got != want { + t.Errorf("ClientID = %q; want %q", got, want) + } +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/jwt.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/jwt.go new file mode 100644 index 000000000..804d9d077 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/jwt.go @@ -0,0 +1,71 @@ +// Copyright 2015 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "crypto/rsa" + "fmt" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" + "golang.org/x/oauth2/jws" +) + +// JWTAccessTokenSourceFromJSON uses a Google Developers service account JSON +// key file to read the credentials that authorize and authenticate the +// requests, and returns a TokenSource that does not use any OAuth2 flow but +// instead creates a JWT and sends that as the access token. +// The audience is typically a URL that specifies the scope of the credentials. +// +// Note that this is not a standard OAuth flow, but rather an +// optimization supported by a few Google services. +// Unless you know otherwise, you should use JWTConfigFromJSON instead. +func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) { + cfg, err := JWTConfigFromJSON(jsonKey) + if err != nil { + return nil, fmt.Errorf("google: could not parse JSON key: %v", err) + } + pk, err := internal.ParseKey(cfg.PrivateKey) + if err != nil { + return nil, fmt.Errorf("google: could not parse key: %v", err) + } + ts := &jwtAccessTokenSource{ + email: cfg.Email, + audience: audience, + pk: pk, + } + tok, err := ts.Token() + if err != nil { + return nil, err + } + return oauth2.ReuseTokenSource(tok, ts), nil +} + +type jwtAccessTokenSource struct { + email, audience string + pk *rsa.PrivateKey +} + +func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) { + iat := time.Now() + exp := iat.Add(time.Hour) + cs := &jws.ClaimSet{ + Iss: ts.email, + Sub: ts.email, + Aud: ts.audience, + Iat: iat.Unix(), + Exp: exp.Unix(), + } + hdr := &jws.Header{ + Algorithm: "RS256", + Typ: "JWT", + } + msg, err := jws.Encode(hdr, cs, ts.pk) + if err != nil { + return nil, fmt.Errorf("google: could not encode JWT: %v", err) + } + return &oauth2.Token{AccessToken: msg, TokenType: "Bearer"}, nil +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/sdk.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/sdk.go new file mode 100644 index 000000000..01ba0ecb0 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/sdk.go @@ -0,0 +1,168 @@ +// Copyright 2015 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + "os/user" + "path/filepath" + "runtime" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" +) + +type sdkCredentials struct { + Data []struct { + Credential struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + TokenExpiry *time.Time `json:"token_expiry"` + } `json:"credential"` + Key struct { + Account string `json:"account"` + Scope string `json:"scope"` + } `json:"key"` + } +} + +// An SDKConfig provides access to tokens from an account already +// authorized via the Google Cloud SDK. +type SDKConfig struct { + conf oauth2.Config + initialToken *oauth2.Token +} + +// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK +// account. If account is empty, the account currently active in +// Google Cloud SDK properties is used. +// Google Cloud SDK credentials must be created by running `gcloud auth` +// before using this function. +// The Google Cloud SDK is available at https://cloud.google.com/sdk/. +func NewSDKConfig(account string) (*SDKConfig, error) { + configPath, err := sdkConfigPath() + if err != nil { + return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err) + } + credentialsPath := filepath.Join(configPath, "credentials") + f, err := os.Open(credentialsPath) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err) + } + defer f.Close() + + var c sdkCredentials + if err := json.NewDecoder(f).Decode(&c); err != nil { + return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err) + } + if len(c.Data) == 0 { + return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath) + } + if account == "" { + propertiesPath := filepath.Join(configPath, "properties") + f, err := os.Open(propertiesPath) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err) + } + defer f.Close() + ini, err := internal.ParseINI(f) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err) + } + core, ok := ini["core"] + if !ok { + return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini) + } + active, ok := core["account"] + if !ok { + return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core) + } + account = active + } + + for _, d := range c.Data { + if account == "" || d.Key.Account == account { + if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" { + return nil, fmt.Errorf("oauth2/google: no token available for account %q", account) + } + var expiry time.Time + if d.Credential.TokenExpiry != nil { + expiry = *d.Credential.TokenExpiry + } + return &SDKConfig{ + conf: oauth2.Config{ + ClientID: d.Credential.ClientID, + ClientSecret: d.Credential.ClientSecret, + Scopes: strings.Split(d.Key.Scope, " "), + Endpoint: Endpoint, + RedirectURL: "oob", + }, + initialToken: &oauth2.Token{ + AccessToken: d.Credential.AccessToken, + RefreshToken: d.Credential.RefreshToken, + Expiry: expiry, + }, + }, nil + } + } + return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account) +} + +// Client returns an HTTP client using Google Cloud SDK credentials to +// authorize requests. The token will auto-refresh as necessary. The +// underlying http.RoundTripper will be obtained using the provided +// context. The returned client and its Transport should not be +// modified. +func (c *SDKConfig) Client(ctx context.Context) *http.Client { + return &http.Client{ + Transport: &oauth2.Transport{ + Source: c.TokenSource(ctx), + }, + } +} + +// TokenSource returns an oauth2.TokenSource that retrieve tokens from +// Google Cloud SDK credentials using the provided context. +// It will returns the current access token stored in the credentials, +// and refresh it when it expires, but it won't update the credentials +// with the new access token. +func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource { + return c.conf.TokenSource(ctx, c.initialToken) +} + +// Scopes are the OAuth 2.0 scopes the current account is authorized for. +func (c *SDKConfig) Scopes() []string { + return c.conf.Scopes +} + +// sdkConfigPath tries to guess where the gcloud config is located. +// It can be overridden during tests. +var sdkConfigPath = func() (string, error) { + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil + } + homeDir := guessUnixHomeDir() + if homeDir == "" { + return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty") + } + return filepath.Join(homeDir, ".config", "gcloud"), nil +} + +func guessUnixHomeDir() string { + usr, err := user.Current() + if err == nil { + return usr.HomeDir + } + return os.Getenv("HOME") +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/sdk_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/sdk_test.go new file mode 100644 index 000000000..79df88964 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/sdk_test.go @@ -0,0 +1,46 @@ +// Copyright 2015 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import "testing" + +func TestSDKConfig(t *testing.T) { + sdkConfigPath = func() (string, error) { + return "testdata/gcloud", nil + } + + tests := []struct { + account string + accessToken string + err bool + }{ + {"", "bar_access_token", false}, + {"foo@example.com", "foo_access_token", false}, + {"bar@example.com", "bar_access_token", false}, + {"baz@serviceaccount.example.com", "", true}, + } + for _, tt := range tests { + c, err := NewSDKConfig(tt.account) + if got, want := err != nil, tt.err; got != want { + if !tt.err { + t.Errorf("expected no error, got error: %v", tt.err, err) + } else { + t.Errorf("expected error, got none") + } + continue + } + if err != nil { + continue + } + tok := c.initialToken + if tok == nil { + t.Errorf("expected token %q, got: nil", tt.accessToken) + continue + } + if tok.AccessToken != tt.accessToken { + t.Errorf("expected token %q, got: %q", tt.accessToken, tok.AccessToken) + } + } +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/credentials b/Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/credentials new file mode 100644 index 000000000..ff5eefbd0 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/credentials @@ -0,0 +1,122 @@ +{ + "data": [ + { + "credential": { + "_class": "OAuth2Credentials", + "_module": "oauth2client.client", + "access_token": "foo_access_token", + "client_id": "foo_client_id", + "client_secret": "foo_client_secret", + "id_token": { + "at_hash": "foo_at_hash", + "aud": "foo_aud", + "azp": "foo_azp", + "cid": "foo_cid", + "email": "foo@example.com", + "email_verified": true, + "exp": 1420573614, + "iat": 1420569714, + "id": "1337", + "iss": "accounts.google.com", + "sub": "1337", + "token_hash": "foo_token_hash", + "verified_email": true + }, + "invalid": false, + "refresh_token": "foo_refresh_token", + "revoke_uri": "https://accounts.google.com/o/oauth2/revoke", + "token_expiry": "2015-01-09T00:51:51Z", + "token_response": { + "access_token": "foo_access_token", + "expires_in": 3600, + "id_token": "foo_id_token", + "token_type": "Bearer" + }, + "token_uri": "https://accounts.google.com/o/oauth2/token", + "user_agent": "Cloud SDK Command Line Tool" + }, + "key": { + "account": "foo@example.com", + "clientId": "foo_client_id", + "scope": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting", + "type": "google-cloud-sdk" + } + }, + { + "credential": { + "_class": "OAuth2Credentials", + "_module": "oauth2client.client", + "access_token": "bar_access_token", + "client_id": "bar_client_id", + "client_secret": "bar_client_secret", + "id_token": { + "at_hash": "bar_at_hash", + "aud": "bar_aud", + "azp": "bar_azp", + "cid": "bar_cid", + "email": "bar@example.com", + "email_verified": true, + "exp": 1420573614, + "iat": 1420569714, + "id": "1337", + "iss": "accounts.google.com", + "sub": "1337", + "token_hash": "bar_token_hash", + "verified_email": true + }, + "invalid": false, + "refresh_token": "bar_refresh_token", + "revoke_uri": "https://accounts.google.com/o/oauth2/revoke", + "token_expiry": "2015-01-09T00:51:51Z", + "token_response": { + "access_token": "bar_access_token", + "expires_in": 3600, + "id_token": "bar_id_token", + "token_type": "Bearer" + }, + "token_uri": "https://accounts.google.com/o/oauth2/token", + "user_agent": "Cloud SDK Command Line Tool" + }, + "key": { + "account": "bar@example.com", + "clientId": "bar_client_id", + "scope": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting", + "type": "google-cloud-sdk" + } + }, + { + "credential": { + "_class": "ServiceAccountCredentials", + "_kwargs": {}, + "_module": "oauth2client.client", + "_private_key_id": "00000000000000000000000000000000", + "_private_key_pkcs8_text": "-----BEGIN RSA PRIVATE KEY-----\nMIICWwIBAAKBgQCt3fpiynPSaUhWSIKMGV331zudwJ6GkGmvQtwsoK2S2LbvnSwU\nNxgj4fp08kIDR5p26wF4+t/HrKydMwzftXBfZ9UmLVJgRdSswmS5SmChCrfDS5OE\nvFFcN5+6w1w8/Nu657PF/dse8T0bV95YrqyoR0Osy8WHrUOMSIIbC3hRuwIDAQAB\nAoGAJrGE/KFjn0sQ7yrZ6sXmdLawrM3mObo/2uI9T60+k7SpGbBX0/Pi6nFrJMWZ\nTVONG7P3Mu5aCPzzuVRYJB0j8aldSfzABTY3HKoWCczqw1OztJiEseXGiYz4QOyr\nYU3qDyEpdhS6q6wcoLKGH+hqRmz6pcSEsc8XzOOu7s4xW8kCQQDkc75HjhbarCnd\nJJGMe3U76+6UGmdK67ltZj6k6xoB5WbTNChY9TAyI2JC+ppYV89zv3ssj4L+02u3\nHIHFGxsHAkEAwtU1qYb1tScpchPobnYUFiVKJ7KA8EZaHVaJJODW/cghTCV7BxcJ\nbgVvlmk4lFKn3lPKAgWw7PdQsBTVBUcCrQJATPwoIirizrv3u5soJUQxZIkENAqV\nxmybZx9uetrzP7JTrVbFRf0SScMcyN90hdLJiQL8+i4+gaszgFht7sNMnwJAAbfj\nq0UXcauQwALQ7/h2oONfTg5S+MuGC/AxcXPSMZbMRGGoPh3D5YaCv27aIuS/ukQ+\n6dmm/9AGlCb64fsIWQJAPaokbjIifo+LwC5gyK73Mc4t8nAOSZDenzd/2f6TCq76\nS1dcnKiPxaED7W/y6LJiuBT2rbZiQ2L93NJpFZD/UA==\n-----END RSA PRIVATE KEY-----\n", + "_revoke_uri": "https://accounts.google.com/o/oauth2/revoke", + "_scopes": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting", + "_service_account_email": "baz@serviceaccount.example.com", + "_service_account_id": "baz.serviceaccount.example.com", + "_token_uri": "https://accounts.google.com/o/oauth2/token", + "_user_agent": "Cloud SDK Command Line Tool", + "access_token": null, + "assertion_type": null, + "client_id": null, + "client_secret": null, + "id_token": null, + "invalid": false, + "refresh_token": null, + "revoke_uri": "https://accounts.google.com/o/oauth2/revoke", + "service_account_name": "baz@serviceaccount.example.com", + "token_expiry": null, + "token_response": null, + "user_agent": "Cloud SDK Command Line Tool" + }, + "key": { + "account": "baz@serviceaccount.example.com", + "clientId": "baz_client_id", + "scope": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting", + "type": "google-cloud-sdk" + } + } + ], + "file_version": 1 +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/properties b/Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/properties new file mode 100644 index 000000000..025de886c --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/properties @@ -0,0 +1,2 @@ +[core] +account = bar@example.com \ No newline at end of file diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2.go b/Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2.go new file mode 100644 index 000000000..dc8ebfc4f --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2.go @@ -0,0 +1,76 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "bufio" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io" + "strings" +) + +// ParseKey converts the binary contents of a private key file +// to an *rsa.PrivateKey. It detects whether the private key is in a +// PEM container or not. If so, it extracts the the private key +// from PEM container before conversion. It only supports PEM +// containers with no passphrase. +func ParseKey(key []byte) (*rsa.PrivateKey, error) { + block, _ := pem.Decode(key) + if block != nil { + key = block.Bytes + } + parsedKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + parsedKey, err = x509.ParsePKCS1PrivateKey(key) + if err != nil { + return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err) + } + } + parsed, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("private key is invalid") + } + return parsed, nil +} + +func ParseINI(ini io.Reader) (map[string]map[string]string, error) { + result := map[string]map[string]string{ + "": map[string]string{}, // root section + } + scanner := bufio.NewScanner(ini) + currentSection := "" + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if strings.HasPrefix(line, ";") { + // comment. + continue + } + if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { + currentSection = strings.TrimSpace(line[1 : len(line)-1]) + result[currentSection] = map[string]string{} + continue + } + parts := strings.SplitN(line, "=", 2) + if len(parts) == 2 && parts[0] != "" { + result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1]) + } + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning ini: %v", err) + } + return result, nil +} + +func CondVal(v string) []string { + if v == "" { + return nil + } + return []string{v} +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2_test.go new file mode 100644 index 000000000..014a351e0 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2_test.go @@ -0,0 +1,62 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "reflect" + "strings" + "testing" +) + +func TestParseINI(t *testing.T) { + tests := []struct { + ini string + want map[string]map[string]string + }{ + { + `root = toor +[foo] +bar = hop +ini = nin +`, + map[string]map[string]string{ + "": map[string]string{"root": "toor"}, + "foo": map[string]string{"bar": "hop", "ini": "nin"}, + }, + }, + { + `[empty] +[section] +empty= +`, + map[string]map[string]string{ + "": map[string]string{}, + "empty": map[string]string{}, + "section": map[string]string{"empty": ""}, + }, + }, + { + `ignore +[invalid +=stuff +;comment=true +`, + map[string]map[string]string{ + "": map[string]string{}, + }, + }, + } + for _, tt := range tests { + result, err := ParseINI(strings.NewReader(tt.ini)) + if err != nil { + t.Errorf("ParseINI(%q) error %v, want: no error", tt.ini, err) + continue + } + if !reflect.DeepEqual(result, tt.want) { + t.Errorf("ParseINI(%q) = %#v, want: %#v", tt.ini, result, tt.want) + } + } +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/internal/token.go b/Godeps/_workspace/src/golang.org/x/oauth2/internal/token.go new file mode 100644 index 000000000..a17d79dd9 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/internal/token.go @@ -0,0 +1,214 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "mime" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "golang.org/x/net/context" +) + +// Token represents the crendentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// This type is a mirror of oauth2.Token and exists to break +// an otherwise-circular dependency. Other internal packages +// should convert this Token into an oauth2.Token before use. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time + + // Raw optionally contains extra metadata from the server + // when updating a token. + Raw interface{} +} + +// tokenJSON is the struct representing the HTTP response from OAuth2 +// providers returning a token in JSON form. +type tokenJSON struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + RefreshToken string `json:"refresh_token"` + ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number + Expires expirationTime `json:"expires"` // broken Facebook spelling of expires_in +} + +func (e *tokenJSON) expiry() (t time.Time) { + if v := e.ExpiresIn; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + if v := e.Expires; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + return +} + +type expirationTime int32 + +func (e *expirationTime) UnmarshalJSON(b []byte) error { + var n json.Number + err := json.Unmarshal(b, &n) + if err != nil { + return err + } + i, err := n.Int64() + if err != nil { + return err + } + *e = expirationTime(i) + return nil +} + +var brokenAuthHeaderProviders = []string{ + "https://accounts.google.com/", + "https://www.googleapis.com/", + "https://github.com/", + "https://api.instagram.com/", + "https://www.douban.com/", + "https://api.dropbox.com/", + "https://api.soundcloud.com/", + "https://www.linkedin.com/", + "https://api.twitch.tv/", + "https://oauth.vk.com/", + "https://api.odnoklassniki.ru/", + "https://connect.stripe.com/", + "https://api.pushbullet.com/", + "https://oauth.sandbox.trainingpeaks.com/", + "https://oauth.trainingpeaks.com/", + "https://www.strava.com/oauth/", + "https://app.box.com/", + "https://test-sandbox.auth.corp.google.com", + "https://user.gini.net/", + "https://api.netatmo.net/", +} + +// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL +// implements the OAuth2 spec correctly +// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. +// In summary: +// - Reddit only accepts client secret in the Authorization header +// - Dropbox accepts either it in URL param or Auth header, but not both. +// - Google only accepts URL param (not spec compliant?), not Auth header +// - Stripe only accepts client secret in Auth header with Bearer method, not Basic +func providerAuthHeaderWorks(tokenURL string) bool { + for _, s := range brokenAuthHeaderProviders { + if strings.HasPrefix(tokenURL, s) { + // Some sites fail to implement the OAuth2 spec fully. + return false + } + } + + // Assume the provider implements the spec properly + // otherwise. We can add more exceptions as they're + // discovered. We will _not_ be adding configurable hooks + // to this package to let users select server bugs. + return true +} + +func RetrieveToken(ctx context.Context, ClientID, ClientSecret, TokenURL string, v url.Values) (*Token, error) { + hc, err := ContextClient(ctx) + if err != nil { + return nil, err + } + v.Set("client_id", ClientID) + bustedAuth := !providerAuthHeaderWorks(TokenURL) + if bustedAuth && ClientSecret != "" { + v.Set("client_secret", ClientSecret) + } + req, err := http.NewRequest("POST", TokenURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + if !bustedAuth { + req.SetBasicAuth(ClientID, ClientSecret) + } + r, err := hc.Do(req) + if err != nil { + return nil, err + } + defer r.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + if code := r.StatusCode; code < 200 || code > 299 { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body) + } + + var token *Token + content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) + switch content { + case "application/x-www-form-urlencoded", "text/plain": + vals, err := url.ParseQuery(string(body)) + if err != nil { + return nil, err + } + token = &Token{ + AccessToken: vals.Get("access_token"), + TokenType: vals.Get("token_type"), + RefreshToken: vals.Get("refresh_token"), + Raw: vals, + } + e := vals.Get("expires_in") + if e == "" { + // TODO(jbd): Facebook's OAuth2 implementation is broken and + // returns expires_in field in expires. Remove the fallback to expires, + // when Facebook fixes their implementation. + e = vals.Get("expires") + } + expires, _ := strconv.Atoi(e) + if expires != 0 { + token.Expiry = time.Now().Add(time.Duration(expires) * time.Second) + } + default: + var tj tokenJSON + if err = json.Unmarshal(body, &tj); err != nil { + return nil, err + } + token = &Token{ + AccessToken: tj.AccessToken, + TokenType: tj.TokenType, + RefreshToken: tj.RefreshToken, + Expiry: tj.expiry(), + Raw: make(map[string]interface{}), + } + json.Unmarshal(body, &token.Raw) // no error checks for optional fields + } + // Don't overwrite `RefreshToken` with an empty value + // if this was a token refreshing request. + if token.RefreshToken == "" { + token.RefreshToken = v.Get("refresh_token") + } + return token, nil +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/internal/token_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/internal/token_test.go new file mode 100644 index 000000000..864f6fa07 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/internal/token_test.go @@ -0,0 +1,28 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "fmt" + "testing" +) + +func Test_providerAuthHeaderWorks(t *testing.T) { + for _, p := range brokenAuthHeaderProviders { + if providerAuthHeaderWorks(p) { + t.Errorf("URL: %s not found in list", p) + } + p := fmt.Sprintf("%ssomesuffix", p) + if providerAuthHeaderWorks(p) { + t.Errorf("URL: %s not found in list", p) + } + } + p := "https://api.not-in-the-list-example.com/" + if !providerAuthHeaderWorks(p) { + t.Errorf("URL: %s found in list", p) + } + +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/internal/transport.go b/Godeps/_workspace/src/golang.org/x/oauth2/internal/transport.go new file mode 100644 index 000000000..521e7b49e --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/internal/transport.go @@ -0,0 +1,67 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "net/http" + + "golang.org/x/net/context" +) + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient ContextKey + +// ContextKey is just an empty struct. It exists so HTTPClient can be +// an immutable public variable with a unique type. It's immutable +// because nobody else can create a ContextKey, being unexported. +type ContextKey struct{} + +// ContextClientFunc is a func which tries to return an *http.Client +// given a Context value. If it returns an error, the search stops +// with that error. If it returns (nil, nil), the search continues +// down the list of registered funcs. +type ContextClientFunc func(context.Context) (*http.Client, error) + +var contextClientFuncs []ContextClientFunc + +func RegisterContextClientFunc(fn ContextClientFunc) { + contextClientFuncs = append(contextClientFuncs, fn) +} + +func ContextClient(ctx context.Context) (*http.Client, error) { + for _, fn := range contextClientFuncs { + c, err := fn(ctx) + if err != nil { + return nil, err + } + if c != nil { + return c, nil + } + } + if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { + return hc, nil + } + return http.DefaultClient, nil +} + +func ContextTransport(ctx context.Context) http.RoundTripper { + hc, err := ContextClient(ctx) + // This is a rare error case (somebody using nil on App Engine). + if err != nil { + return ErrorTransport{err} + } + return hc.Transport +} + +// ErrorTransport returns the specified error on RoundTrip. +// This RoundTripper should be used in rare error cases where +// error handling can be postponed to response handling time. +type ErrorTransport struct{ Err error } + +func (t ErrorTransport) RoundTrip(*http.Request) (*http.Response, error) { + return nil, t.Err +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/jws/jws.go b/Godeps/_workspace/src/golang.org/x/oauth2/jws/jws.go new file mode 100644 index 000000000..994ae1e88 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/jws/jws.go @@ -0,0 +1,160 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jws provides encoding and decoding utilities for +// signed JWS messages. +package jws + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + "time" +) + +// ClaimSet contains information about the JWT signature including the +// permissions being requested (scopes), the target of the token, the issuer, +// the time the token was issued, and the lifetime of the token. +type ClaimSet struct { + Iss string `json:"iss"` // email address of the client_id of the application making the access token request + Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests + Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional). + Exp int64 `json:"exp"` // the expiration time of the assertion (seconds since Unix epoch) + Iat int64 `json:"iat"` // the time the assertion was issued (seconds since Unix epoch) + Typ string `json:"typ,omitempty"` // token type (Optional). + + // Email for which the application is requesting delegated access (Optional). + Sub string `json:"sub,omitempty"` + + // The old name of Sub. Client keeps setting Prn to be + // complaint with legacy OAuth 2.0 providers. (Optional) + Prn string `json:"prn,omitempty"` + + // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 + // This array is marshalled using custom code (see (c *ClaimSet) encode()). + PrivateClaims map[string]interface{} `json:"-"` + + exp time.Time + iat time.Time +} + +func (c *ClaimSet) encode() (string, error) { + if c.exp.IsZero() || c.iat.IsZero() { + // Reverting time back for machines whose time is not perfectly in sync. + // If client machine's time is in the future according + // to Google servers, an access token will not be issued. + now := time.Now().Add(-10 * time.Second) + c.iat = now + c.exp = now.Add(time.Hour) + } + + c.Exp = c.exp.Unix() + c.Iat = c.iat.Unix() + + b, err := json.Marshal(c) + if err != nil { + return "", err + } + + if len(c.PrivateClaims) == 0 { + return base64Encode(b), nil + } + + // Marshal private claim set and then append it to b. + prv, err := json.Marshal(c.PrivateClaims) + if err != nil { + return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims) + } + + // Concatenate public and private claim JSON objects. + if !bytes.HasSuffix(b, []byte{'}'}) { + return "", fmt.Errorf("jws: invalid JSON %s", b) + } + if !bytes.HasPrefix(prv, []byte{'{'}) { + return "", fmt.Errorf("jws: invalid JSON %s", prv) + } + b[len(b)-1] = ',' // Replace closing curly brace with a comma. + b = append(b, prv[1:]...) // Append private claims. + return base64Encode(b), nil +} + +// Header represents the header for the signed JWS payloads. +type Header struct { + // The algorithm used for signature. + Algorithm string `json:"alg"` + + // Represents the token type. + Typ string `json:"typ"` +} + +func (h *Header) encode() (string, error) { + b, err := json.Marshal(h) + if err != nil { + return "", err + } + return base64Encode(b), nil +} + +// Decode decodes a claim set from a JWS payload. +func Decode(payload string) (*ClaimSet, error) { + // decode returned id token to get expiry + s := strings.Split(payload, ".") + if len(s) < 2 { + // TODO(jbd): Provide more context about the error. + return nil, errors.New("jws: invalid token received") + } + decoded, err := base64Decode(s[1]) + if err != nil { + return nil, err + } + c := &ClaimSet{} + err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c) + return c, err +} + +// Encode encodes a signed JWS with provided header and claim set. +func Encode(header *Header, c *ClaimSet, signature *rsa.PrivateKey) (string, error) { + head, err := header.encode() + if err != nil { + return "", err + } + cs, err := c.encode() + if err != nil { + return "", err + } + ss := fmt.Sprintf("%s.%s", head, cs) + h := sha256.New() + h.Write([]byte(ss)) + b, err := rsa.SignPKCS1v15(rand.Reader, signature, crypto.SHA256, h.Sum(nil)) + if err != nil { + return "", err + } + sig := base64Encode(b) + return fmt.Sprintf("%s.%s", ss, sig), nil +} + +// base64Encode returns and Base64url encoded version of the input string with any +// trailing "=" stripped. +func base64Encode(b []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// base64Decode decodes the Base64url encoded string +func base64Decode(s string) ([]byte, error) { + // add back missing padding + switch len(s) % 4 { + case 2: + s += "==" + case 3: + s += "=" + } + return base64.URLEncoding.DecodeString(s) +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/jwt/example_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/jwt/example_test.go new file mode 100644 index 000000000..6d618836e --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/jwt/example_test.go @@ -0,0 +1,31 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jwt_test + +import ( + "golang.org/x/oauth2" + "golang.org/x/oauth2/jwt" +) + +func ExampleJWTConfig() { + conf := &jwt.Config{ + Email: "xxx@developer.com", + // The contents of your RSA private key or your PEM file + // that contains a private key. + // If you have a p12 file instead, you + // can use `openssl` to export the private key into a pem file. + // + // $ openssl pkcs12 -in key.p12 -out key.pem -nodes + // + // It only supports PEM containers with no passphrase. + PrivateKey: []byte("-----BEGIN RSA PRIVATE KEY-----..."), + Subject: "user@example.com", + TokenURL: "https://provider.com/o/oauth2/token", + } + // Initiate an http.Client, the following GET request will be + // authorized and authenticated on the behalf of user@example.com. + client := conf.Client(oauth2.NoContext) + client.Get("...") +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt.go b/Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt.go new file mode 100644 index 000000000..205d23ed4 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt.go @@ -0,0 +1,147 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly +// known as "two-legged OAuth 2.0". +// +// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12 +package jwt + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" + "golang.org/x/oauth2/jws" +) + +var ( + defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" + defaultHeader = &jws.Header{Algorithm: "RS256", Typ: "JWT"} +) + +// Config is the configuration for using JWT to fetch tokens, +// commonly known as "two-legged OAuth 2.0". +type Config struct { + // Email is the OAuth client identifier used when communicating with + // the configured OAuth provider. + Email string + + // PrivateKey contains the contents of an RSA private key or the + // contents of a PEM file that contains a private key. The provided + // private key is used to sign JWT payloads. + // PEM containers with a passphrase are not supported. + // Use the following command to convert a PKCS 12 file into a PEM. + // + // $ openssl pkcs12 -in key.p12 -out key.pem -nodes + // + PrivateKey []byte + + // Subject is the optional user to impersonate. + Subject string + + // Scopes optionally specifies a list of requested permission scopes. + Scopes []string + + // TokenURL is the endpoint required to complete the 2-legged JWT flow. + TokenURL string +} + +// TokenSource returns a JWT TokenSource using the configuration +// in c and the HTTP client from the provided context. +func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c}) +} + +// Client returns an HTTP client wrapping the context's +// HTTP transport and adding Authorization headers with tokens +// obtained from c. +// +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context) *http.Client { + return oauth2.NewClient(ctx, c.TokenSource(ctx)) +} + +// jwtSource is a source that always does a signed JWT request for a token. +// It should typically be wrapped with a reuseTokenSource. +type jwtSource struct { + ctx context.Context + conf *Config +} + +func (js jwtSource) Token() (*oauth2.Token, error) { + pk, err := internal.ParseKey(js.conf.PrivateKey) + if err != nil { + return nil, err + } + hc := oauth2.NewClient(js.ctx, nil) + claimSet := &jws.ClaimSet{ + Iss: js.conf.Email, + Scope: strings.Join(js.conf.Scopes, " "), + Aud: js.conf.TokenURL, + } + if subject := js.conf.Subject; subject != "" { + claimSet.Sub = subject + // prn is the old name of sub. Keep setting it + // to be compatible with legacy OAuth 2.0 providers. + claimSet.Prn = subject + } + payload, err := jws.Encode(defaultHeader, claimSet, pk) + if err != nil { + return nil, err + } + v := url.Values{} + v.Set("grant_type", defaultGrantType) + v.Set("assertion", payload) + resp, err := hc.PostForm(js.conf.TokenURL, v) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", resp.Status, body) + } + // tokenRes is the JSON response body. + var tokenRes struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + IDToken string `json:"id_token"` + ExpiresIn int64 `json:"expires_in"` // relative seconds from now + } + if err := json.Unmarshal(body, &tokenRes); err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + token := &oauth2.Token{ + AccessToken: tokenRes.AccessToken, + TokenType: tokenRes.TokenType, + } + raw := make(map[string]interface{}) + json.Unmarshal(body, &raw) // no error checks for optional fields + token = token.WithExtra(raw) + + if secs := tokenRes.ExpiresIn; secs > 0 { + token.Expiry = time.Now().Add(time.Duration(secs) * time.Second) + } + if v := tokenRes.IDToken; v != "" { + // decode returned id token to get expiry + claimSet, err := jws.Decode(v) + if err != nil { + return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err) + } + token.Expiry = time.Unix(claimSet.Exp, 0) + } + return token, nil +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt_test.go new file mode 100644 index 000000000..da922c3d0 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt_test.go @@ -0,0 +1,134 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jwt + +import ( + "net/http" + "net/http/httptest" + "testing" + + "golang.org/x/oauth2" +) + +var dummyPrivateKey = []byte(`-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAx4fm7dngEmOULNmAs1IGZ9Apfzh+BkaQ1dzkmbUgpcoghucE +DZRnAGd2aPyB6skGMXUytWQvNYav0WTR00wFtX1ohWTfv68HGXJ8QXCpyoSKSSFY +fuP9X36wBSkSX9J5DVgiuzD5VBdzUISSmapjKm+DcbRALjz6OUIPEWi1Tjl6p5RK +1w41qdbmt7E5/kGhKLDuT7+M83g4VWhgIvaAXtnhklDAggilPPa8ZJ1IFe31lNlr +k4DRk38nc6sEutdf3RL7QoH7FBusI7uXV03DC6dwN1kP4GE7bjJhcRb/7jYt7CQ9 +/E9Exz3c0yAp0yrTg0Fwh+qxfH9dKwN52S7SBwIDAQABAoIBAQCaCs26K07WY5Jt +3a2Cw3y2gPrIgTCqX6hJs7O5ByEhXZ8nBwsWANBUe4vrGaajQHdLj5OKfsIDrOvn +2NI1MqflqeAbu/kR32q3tq8/Rl+PPiwUsW3E6Pcf1orGMSNCXxeducF2iySySzh3 +nSIhCG5uwJDWI7a4+9KiieFgK1pt/Iv30q1SQS8IEntTfXYwANQrfKUVMmVF9aIK +6/WZE2yd5+q3wVVIJ6jsmTzoDCX6QQkkJICIYwCkglmVy5AeTckOVwcXL0jqw5Kf +5/soZJQwLEyBoQq7Kbpa26QHq+CJONetPP8Ssy8MJJXBT+u/bSseMb3Zsr5cr43e +DJOhwsThAoGBAPY6rPKl2NT/K7XfRCGm1sbWjUQyDShscwuWJ5+kD0yudnT/ZEJ1 +M3+KS/iOOAoHDdEDi9crRvMl0UfNa8MAcDKHflzxg2jg/QI+fTBjPP5GOX0lkZ9g +z6VePoVoQw2gpPFVNPPTxKfk27tEzbaffvOLGBEih0Kb7HTINkW8rIlzAoGBAM9y +1yr+jvfS1cGFtNU+Gotoihw2eMKtIqR03Yn3n0PK1nVCDKqwdUqCypz4+ml6cxRK +J8+Pfdh7D+ZJd4LEG6Y4QRDLuv5OA700tUoSHxMSNn3q9As4+T3MUyYxWKvTeu3U +f2NWP9ePU0lV8ttk7YlpVRaPQmc1qwooBA/z/8AdAoGAW9x0HWqmRICWTBnpjyxx +QGlW9rQ9mHEtUotIaRSJ6K/F3cxSGUEkX1a3FRnp6kPLcckC6NlqdNgNBd6rb2rA +cPl/uSkZP42Als+9YMoFPU/xrrDPbUhu72EDrj3Bllnyb168jKLa4VBOccUvggxr +Dm08I1hgYgdN5huzs7y6GeUCgYEAj+AZJSOJ6o1aXS6rfV3mMRve9bQ9yt8jcKXw +5HhOCEmMtaSKfnOF1Ziih34Sxsb7O2428DiX0mV/YHtBnPsAJidL0SdLWIapBzeg +KHArByIRkwE6IvJvwpGMdaex1PIGhx5i/3VZL9qiq/ElT05PhIb+UXgoWMabCp84 +OgxDK20CgYAeaFo8BdQ7FmVX2+EEejF+8xSge6WVLtkaon8bqcn6P0O8lLypoOhd +mJAYH8WU+UAy9pecUnDZj14LAGNVmYcse8HFX71MoshnvCTFEPVo4rZxIAGwMpeJ +5jgQ3slYLpqrGlcbLgUXBUgzEO684Wk/UV9DFPlHALVqCfXQ9dpJPg== +-----END RSA PRIVATE KEY-----`) + +func TestJWTFetch_JSONResponse(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{ + "access_token": "90d64460d14870c08c81352a05dedd3465940a7c", + "scope": "user", + "token_type": "bearer", + "expires_in": 3600 + }`)) + })) + defer ts.Close() + + conf := &Config{ + Email: "aaa@xxx.com", + PrivateKey: dummyPrivateKey, + TokenURL: ts.URL, + } + tok, err := conf.TokenSource(oauth2.NoContext).Token() + if err != nil { + t.Fatal(err) + } + if !tok.Valid() { + t.Errorf("Token invalid") + } + if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" { + t.Errorf("Unexpected access token, %#v", tok.AccessToken) + } + if tok.TokenType != "bearer" { + t.Errorf("Unexpected token type, %#v", tok.TokenType) + } + if tok.Expiry.IsZero() { + t.Errorf("Unexpected token expiry, %#v", tok.Expiry) + } + scope := tok.Extra("scope") + if scope != "user" { + t.Errorf("Unexpected value for scope: %v", scope) + } +} + +func TestJWTFetch_BadResponse(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"scope": "user", "token_type": "bearer"}`)) + })) + defer ts.Close() + + conf := &Config{ + Email: "aaa@xxx.com", + PrivateKey: dummyPrivateKey, + TokenURL: ts.URL, + } + tok, err := conf.TokenSource(oauth2.NoContext).Token() + if err != nil { + t.Fatal(err) + } + if tok == nil { + t.Fatalf("token is nil") + } + if tok.Valid() { + t.Errorf("token is valid. want invalid.") + } + if tok.AccessToken != "" { + t.Errorf("Unexpected non-empty access token %q.", tok.AccessToken) + } + if want := "bearer"; tok.TokenType != want { + t.Errorf("TokenType = %q; want %q", tok.TokenType, want) + } + scope := tok.Extra("scope") + if want := "user"; scope != want { + t.Errorf("token scope = %q; want %q", scope, want) + } +} + +func TestJWTFetch_BadResponseType(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"access_token":123, "scope": "user", "token_type": "bearer"}`)) + })) + defer ts.Close() + conf := &Config{ + Email: "aaa@xxx.com", + PrivateKey: dummyPrivateKey, + TokenURL: ts.URL, + } + tok, err := conf.TokenSource(oauth2.NoContext).Token() + if err == nil { + t.Error("got a token; expected error") + if tok.AccessToken != "" { + t.Errorf("Unexpected access token, %#v.", tok.AccessToken) + } + } +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/linkedin/linkedin.go b/Godeps/_workspace/src/golang.org/x/oauth2/linkedin/linkedin.go new file mode 100644 index 000000000..d93fded6a --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/linkedin/linkedin.go @@ -0,0 +1,16 @@ +// Copyright 2015 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package linkedin provides constants for using OAuth2 to access LinkedIn. +package linkedin + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is LinkedIn's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://www.linkedin.com/uas/oauth2/authorization", + TokenURL: "https://www.linkedin.com/uas/oauth2/accessToken", +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/oauth2.go b/Godeps/_workspace/src/golang.org/x/oauth2/oauth2.go new file mode 100644 index 000000000..dfcf238d2 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/oauth2.go @@ -0,0 +1,325 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package oauth2 provides support for making +// OAuth2 authorized and authenticated HTTP requests. +// It can additionally grant authorization with Bearer JWT. +package oauth2 + +import ( + "bytes" + "errors" + "net/http" + "net/url" + "strings" + "sync" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" +) + +// NoContext is the default context you should supply if not using +// your own context.Context (see https://golang.org/x/net/context). +var NoContext = context.TODO() + +// Config describes a typical 3-legged OAuth2 flow, with both the +// client application information and the server's endpoint URLs. +type Config struct { + // ClientID is the application's ID. + ClientID string + + // ClientSecret is the application's secret. + ClientSecret string + + // Endpoint contains the resource server's token endpoint + // URLs. These are constants specific to each server and are + // often available via site-specific packages, such as + // google.Endpoint or github.Endpoint. + Endpoint Endpoint + + // RedirectURL is the URL to redirect users going through + // the OAuth flow, after the resource owner's URLs. + RedirectURL string + + // Scope specifies optional requested permissions. + Scopes []string +} + +// A TokenSource is anything that can return a token. +type TokenSource interface { + // Token returns a token or an error. + // Token must be safe for concurrent use by multiple goroutines. + // The returned Token must not be modified. + Token() (*Token, error) +} + +// Endpoint contains the OAuth 2.0 provider's authorization and token +// endpoint URLs. +type Endpoint struct { + AuthURL string + TokenURL string +} + +var ( + // AccessTypeOnline and AccessTypeOffline are options passed + // to the Options.AuthCodeURL method. They modify the + // "access_type" field that gets sent in the URL returned by + // AuthCodeURL. + // + // Online is the default if neither is specified. If your + // application needs to refresh access tokens when the user + // is not present at the browser, then use offline. This will + // result in your application obtaining a refresh token the + // first time your application exchanges an authorization + // code for a user. + AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online") + AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline") + + // ApprovalForce forces the users to view the consent dialog + // and confirm the permissions request at the URL returned + // from AuthCodeURL, even if they've already done so. + ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force") +) + +// An AuthCodeOption is passed to Config.AuthCodeURL. +type AuthCodeOption interface { + setValue(url.Values) +} + +type setParam struct{ k, v string } + +func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) } + +// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters +// to a provider's authorization endpoint. +func SetAuthURLParam(key, value string) AuthCodeOption { + return setParam{key, value} +} + +// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page +// that asks for permissions for the required scopes explicitly. +// +// State is a token to protect the user from CSRF attacks. You must +// always provide a non-zero string and validate that it matches the +// the state query parameter on your redirect callback. +// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. +// +// Opts may include AccessTypeOnline or AccessTypeOffline, as well +// as ApprovalForce. +func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { + var buf bytes.Buffer + buf.WriteString(c.Endpoint.AuthURL) + v := url.Values{ + "response_type": {"code"}, + "client_id": {c.ClientID}, + "redirect_uri": internal.CondVal(c.RedirectURL), + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + "state": internal.CondVal(state), + } + for _, opt := range opts { + opt.setValue(v) + } + if strings.Contains(c.Endpoint.AuthURL, "?") { + buf.WriteByte('&') + } else { + buf.WriteByte('?') + } + buf.WriteString(v.Encode()) + return buf.String() +} + +// PasswordCredentialsToken converts a resource owner username and password +// pair into a token. +// +// Per the RFC, this grant type should only be used "when there is a high +// degree of trust between the resource owner and the client (e.g., the client +// is part of the device operating system or a highly privileged application), +// and when other authorization grant types are not available." +// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. +// +// The HTTP client to use is derived from the context. +// If nil, http.DefaultClient is used. +func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { + return retrieveToken(ctx, c, url.Values{ + "grant_type": {"password"}, + "username": {username}, + "password": {password}, + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + }) +} + +// Exchange converts an authorization code into a token. +// +// It is used after a resource provider redirects the user back +// to the Redirect URI (the URL obtained from AuthCodeURL). +// +// The HTTP client to use is derived from the context. +// If a client is not provided via the context, http.DefaultClient is used. +// +// The code will be in the *http.Request.FormValue("code"). Before +// calling Exchange, be sure to validate FormValue("state"). +func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) { + return retrieveToken(ctx, c, url.Values{ + "grant_type": {"authorization_code"}, + "code": {code}, + "redirect_uri": internal.CondVal(c.RedirectURL), + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + }) +} + +// Client returns an HTTP client using the provided token. +// The token will auto-refresh as necessary. The underlying +// HTTP transport will be obtained using the provided context. +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context, t *Token) *http.Client { + return NewClient(ctx, c.TokenSource(ctx, t)) +} + +// TokenSource returns a TokenSource that returns t until t expires, +// automatically refreshing it as necessary using the provided context. +// +// Most users will use Config.Client instead. +func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { + tkr := &tokenRefresher{ + ctx: ctx, + conf: c, + } + if t != nil { + tkr.refreshToken = t.RefreshToken + } + return &reuseTokenSource{ + t: t, + new: tkr, + } +} + +// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token" +// HTTP requests to renew a token using a RefreshToken. +type tokenRefresher struct { + ctx context.Context // used to get HTTP requests + conf *Config + refreshToken string +} + +// WARNING: Token is not safe for concurrent access, as it +// updates the tokenRefresher's refreshToken field. +// Within this package, it is used by reuseTokenSource which +// synchronizes calls to this method with its own mutex. +func (tf *tokenRefresher) Token() (*Token, error) { + if tf.refreshToken == "" { + return nil, errors.New("oauth2: token expired and refresh token is not set") + } + + tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{ + "grant_type": {"refresh_token"}, + "refresh_token": {tf.refreshToken}, + }) + + if err != nil { + return nil, err + } + if tf.refreshToken != tk.RefreshToken { + tf.refreshToken = tk.RefreshToken + } + return tk, err +} + +// reuseTokenSource is a TokenSource that holds a single token in memory +// and validates its expiry before each call to retrieve it with +// Token. If it's expired, it will be auto-refreshed using the +// new TokenSource. +type reuseTokenSource struct { + new TokenSource // called when t is expired. + + mu sync.Mutex // guards t + t *Token +} + +// Token returns the current token if it's still valid, else will +// refresh the current token (using r.Context for HTTP client +// information) and return the new one. +func (s *reuseTokenSource) Token() (*Token, error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.t.Valid() { + return s.t, nil + } + t, err := s.new.Token() + if err != nil { + return nil, err + } + s.t = t + return t, nil +} + +// StaticTokenSource returns a TokenSource that always returns the same token. +// Because the provided token t is never refreshed, StaticTokenSource is only +// useful for tokens that never expire. +func StaticTokenSource(t *Token) TokenSource { + return staticTokenSource{t} +} + +// staticTokenSource is a TokenSource that always returns the same Token. +type staticTokenSource struct { + t *Token +} + +func (s staticTokenSource) Token() (*Token, error) { + return s.t, nil +} + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient internal.ContextKey + +// NewClient creates an *http.Client from a Context and TokenSource. +// The returned client is not valid beyond the lifetime of the context. +// +// As a special case, if src is nil, a non-OAuth2 client is returned +// using the provided context. This exists to support related OAuth2 +// packages. +func NewClient(ctx context.Context, src TokenSource) *http.Client { + if src == nil { + c, err := internal.ContextClient(ctx) + if err != nil { + return &http.Client{Transport: internal.ErrorTransport{err}} + } + return c + } + return &http.Client{ + Transport: &Transport{ + Base: internal.ContextTransport(ctx), + Source: ReuseTokenSource(nil, src), + }, + } +} + +// ReuseTokenSource returns a TokenSource which repeatedly returns the +// same token as long as it's valid, starting with t. +// When its cached token is invalid, a new token is obtained from src. +// +// ReuseTokenSource is typically used to reuse tokens from a cache +// (such as a file on disk) between runs of a program, rather than +// obtaining new tokens unnecessarily. +// +// The initial token t may be nil, in which case the TokenSource is +// wrapped in a caching version if it isn't one already. This also +// means it's always safe to wrap ReuseTokenSource around any other +// TokenSource without adverse effects. +func ReuseTokenSource(t *Token, src TokenSource) TokenSource { + // Don't wrap a reuseTokenSource in itself. That would work, + // but cause an unnecessary number of mutex operations. + // Just build the equivalent one. + if rt, ok := src.(*reuseTokenSource); ok { + if t == nil { + // Just use it directly. + return rt + } + src = rt.new + } + return &reuseTokenSource{ + t: t, + new: src, + } +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/oauth2_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/oauth2_test.go new file mode 100644 index 000000000..2f7d731c1 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/oauth2_test.go @@ -0,0 +1,422 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "reflect" + "strconv" + "testing" + "time" + + "golang.org/x/net/context" +) + +type mockTransport struct { + rt func(req *http.Request) (resp *http.Response, err error) +} + +func (t *mockTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) { + return t.rt(req) +} + +type mockCache struct { + token *Token + readErr error +} + +func (c *mockCache) ReadToken() (*Token, error) { + return c.token, c.readErr +} + +func (c *mockCache) WriteToken(*Token) { + // do nothing +} + +func newConf(url string) *Config { + return &Config{ + ClientID: "CLIENT_ID", + ClientSecret: "CLIENT_SECRET", + RedirectURL: "REDIRECT_URL", + Scopes: []string{"scope1", "scope2"}, + Endpoint: Endpoint{ + AuthURL: url + "/auth", + TokenURL: url + "/token", + }, + } +} + +func TestAuthCodeURL(t *testing.T) { + conf := newConf("server") + url := conf.AuthCodeURL("foo", AccessTypeOffline, ApprovalForce) + if url != "server/auth?access_type=offline&approval_prompt=force&client_id=CLIENT_ID&redirect_uri=REDIRECT_URL&response_type=code&scope=scope1+scope2&state=foo" { + t.Errorf("Auth code URL doesn't match the expected, found: %v", url) + } +} + +func TestAuthCodeURL_CustomParam(t *testing.T) { + conf := newConf("server") + param := SetAuthURLParam("foo", "bar") + url := conf.AuthCodeURL("baz", param) + if url != "server/auth?client_id=CLIENT_ID&foo=bar&redirect_uri=REDIRECT_URL&response_type=code&scope=scope1+scope2&state=baz" { + t.Errorf("Auth code URL doesn't match the expected, found: %v", url) + } +} + +func TestAuthCodeURL_Optional(t *testing.T) { + conf := &Config{ + ClientID: "CLIENT_ID", + Endpoint: Endpoint{ + AuthURL: "/auth-url", + TokenURL: "/token-url", + }, + } + url := conf.AuthCodeURL("") + if url != "/auth-url?client_id=CLIENT_ID&response_type=code" { + t.Fatalf("Auth code URL doesn't match the expected, found: %v", url) + } +} + +func TestExchangeRequest(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() != "/token" { + t.Errorf("Unexpected exchange request URL, %v is found.", r.URL) + } + headerAuth := r.Header.Get("Authorization") + if headerAuth != "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" { + t.Errorf("Unexpected authorization header, %v is found.", headerAuth) + } + headerContentType := r.Header.Get("Content-Type") + if headerContentType != "application/x-www-form-urlencoded" { + t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType) + } + body, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Errorf("Failed reading request body: %s.", err) + } + if string(body) != "client_id=CLIENT_ID&code=exchange-code&grant_type=authorization_code&redirect_uri=REDIRECT_URL&scope=scope1+scope2" { + t.Errorf("Unexpected exchange payload, %v is found.", string(body)) + } + w.Header().Set("Content-Type", "application/x-www-form-urlencoded") + w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&scope=user&token_type=bearer")) + })) + defer ts.Close() + conf := newConf(ts.URL) + tok, err := conf.Exchange(NoContext, "exchange-code") + if err != nil { + t.Error(err) + } + if !tok.Valid() { + t.Fatalf("Token invalid. Got: %#v", tok) + } + if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" { + t.Errorf("Unexpected access token, %#v.", tok.AccessToken) + } + if tok.TokenType != "bearer" { + t.Errorf("Unexpected token type, %#v.", tok.TokenType) + } + scope := tok.Extra("scope") + if scope != "user" { + t.Errorf("Unexpected value for scope: %v", scope) + } +} + +func TestExchangeRequest_JSONResponse(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() != "/token" { + t.Errorf("Unexpected exchange request URL, %v is found.", r.URL) + } + headerAuth := r.Header.Get("Authorization") + if headerAuth != "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" { + t.Errorf("Unexpected authorization header, %v is found.", headerAuth) + } + headerContentType := r.Header.Get("Content-Type") + if headerContentType != "application/x-www-form-urlencoded" { + t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType) + } + body, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Errorf("Failed reading request body: %s.", err) + } + if string(body) != "client_id=CLIENT_ID&code=exchange-code&grant_type=authorization_code&redirect_uri=REDIRECT_URL&scope=scope1+scope2" { + t.Errorf("Unexpected exchange payload, %v is found.", string(body)) + } + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"access_token": "90d64460d14870c08c81352a05dedd3465940a7c", "scope": "user", "token_type": "bearer", "expires_in": 86400}`)) + })) + defer ts.Close() + conf := newConf(ts.URL) + tok, err := conf.Exchange(NoContext, "exchange-code") + if err != nil { + t.Error(err) + } + if !tok.Valid() { + t.Fatalf("Token invalid. Got: %#v", tok) + } + if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" { + t.Errorf("Unexpected access token, %#v.", tok.AccessToken) + } + if tok.TokenType != "bearer" { + t.Errorf("Unexpected token type, %#v.", tok.TokenType) + } + scope := tok.Extra("scope") + if scope != "user" { + t.Errorf("Unexpected value for scope: %v", scope) + } +} + +const day = 24 * time.Hour + +func TestExchangeRequest_JSONResponse_Expiry(t *testing.T) { + seconds := int32(day.Seconds()) + jsonNumberType := reflect.TypeOf(json.Number("0")) + for _, c := range []struct { + expires string + expect error + }{ + {fmt.Sprintf(`"expires_in": %d`, seconds), nil}, + {fmt.Sprintf(`"expires_in": "%d"`, seconds), nil}, // PayPal case + {fmt.Sprintf(`"expires": %d`, seconds), nil}, // Facebook case + {`"expires": false`, &json.UnmarshalTypeError{Value: "bool", Type: jsonNumberType}}, // wrong type + {`"expires": {}`, &json.UnmarshalTypeError{Value: "object", Type: jsonNumberType}}, // wrong type + {`"expires": "zzz"`, &strconv.NumError{Func: "ParseInt", Num: "zzz", Err: strconv.ErrSyntax}}, // wrong value + } { + testExchangeRequest_JSONResponse_expiry(t, c.expires, c.expect) + } +} + +func testExchangeRequest_JSONResponse_expiry(t *testing.T, exp string, expect error) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(fmt.Sprintf(`{"access_token": "90d", "scope": "user", "token_type": "bearer", %s}`, exp))) + })) + defer ts.Close() + conf := newConf(ts.URL) + t1 := time.Now().Add(day) + tok, err := conf.Exchange(NoContext, "exchange-code") + t2 := time.Now().Add(day) + // Do a fmt.Sprint comparison so either side can be + // nil. fmt.Sprint just stringifies them to "", and no + // non-nil expected error ever stringifies as "", so this + // isn't terribly disgusting. We do this because Go 1.4 and + // Go 1.5 return a different deep value for + // json.UnmarshalTypeError. In Go 1.5, the + // json.UnmarshalTypeError contains a new field with a new + // non-zero value. Rather than ignore it here with reflect or + // add new files and +build tags, just look at the strings. + if fmt.Sprint(err) != fmt.Sprint(expect) { + t.Errorf("Error = %v; want %v", err, expect) + } + if err != nil { + return + } + if !tok.Valid() { + t.Fatalf("Token invalid. Got: %#v", tok) + } + expiry := tok.Expiry + if expiry.Before(t1) || expiry.After(t2) { + t.Errorf("Unexpected value for Expiry: %v (shold be between %v and %v)", expiry, t1, t2) + } +} + +func TestExchangeRequest_BadResponse(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"scope": "user", "token_type": "bearer"}`)) + })) + defer ts.Close() + conf := newConf(ts.URL) + tok, err := conf.Exchange(NoContext, "code") + if err != nil { + t.Fatal(err) + } + if tok.AccessToken != "" { + t.Errorf("Unexpected access token, %#v.", tok.AccessToken) + } +} + +func TestExchangeRequest_BadResponseType(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"access_token":123, "scope": "user", "token_type": "bearer"}`)) + })) + defer ts.Close() + conf := newConf(ts.URL) + _, err := conf.Exchange(NoContext, "exchange-code") + if err == nil { + t.Error("expected error from invalid access_token type") + } +} + +func TestExchangeRequest_NonBasicAuth(t *testing.T) { + tr := &mockTransport{ + rt: func(r *http.Request) (w *http.Response, err error) { + headerAuth := r.Header.Get("Authorization") + if headerAuth != "" { + t.Errorf("Unexpected authorization header, %v is found.", headerAuth) + } + return nil, errors.New("no response") + }, + } + c := &http.Client{Transport: tr} + conf := &Config{ + ClientID: "CLIENT_ID", + Endpoint: Endpoint{ + AuthURL: "https://accounts.google.com/auth", + TokenURL: "https://accounts.google.com/token", + }, + } + + ctx := context.WithValue(context.Background(), HTTPClient, c) + conf.Exchange(ctx, "code") +} + +func TestPasswordCredentialsTokenRequest(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + expected := "/token" + if r.URL.String() != expected { + t.Errorf("URL = %q; want %q", r.URL, expected) + } + headerAuth := r.Header.Get("Authorization") + expected = "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" + if headerAuth != expected { + t.Errorf("Authorization header = %q; want %q", headerAuth, expected) + } + headerContentType := r.Header.Get("Content-Type") + expected = "application/x-www-form-urlencoded" + if headerContentType != expected { + t.Errorf("Content-Type header = %q; want %q", headerContentType, expected) + } + body, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Errorf("Failed reading request body: %s.", err) + } + expected = "client_id=CLIENT_ID&grant_type=password&password=password1&scope=scope1+scope2&username=user1" + if string(body) != expected { + t.Errorf("res.Body = %q; want %q", string(body), expected) + } + w.Header().Set("Content-Type", "application/x-www-form-urlencoded") + w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&scope=user&token_type=bearer")) + })) + defer ts.Close() + conf := newConf(ts.URL) + tok, err := conf.PasswordCredentialsToken(NoContext, "user1", "password1") + if err != nil { + t.Error(err) + } + if !tok.Valid() { + t.Fatalf("Token invalid. Got: %#v", tok) + } + expected := "90d64460d14870c08c81352a05dedd3465940a7c" + if tok.AccessToken != expected { + t.Errorf("AccessToken = %q; want %q", tok.AccessToken, expected) + } + expected = "bearer" + if tok.TokenType != expected { + t.Errorf("TokenType = %q; want %q", tok.TokenType, expected) + } +} + +func TestTokenRefreshRequest(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() == "/somethingelse" { + return + } + if r.URL.String() != "/token" { + t.Errorf("Unexpected token refresh request URL, %v is found.", r.URL) + } + headerContentType := r.Header.Get("Content-Type") + if headerContentType != "application/x-www-form-urlencoded" { + t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType) + } + body, _ := ioutil.ReadAll(r.Body) + if string(body) != "client_id=CLIENT_ID&grant_type=refresh_token&refresh_token=REFRESH_TOKEN" { + t.Errorf("Unexpected refresh token payload, %v is found.", string(body)) + } + })) + defer ts.Close() + conf := newConf(ts.URL) + c := conf.Client(NoContext, &Token{RefreshToken: "REFRESH_TOKEN"}) + c.Get(ts.URL + "/somethingelse") +} + +func TestFetchWithNoRefreshToken(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() == "/somethingelse" { + return + } + if r.URL.String() != "/token" { + t.Errorf("Unexpected token refresh request URL, %v is found.", r.URL) + } + headerContentType := r.Header.Get("Content-Type") + if headerContentType != "application/x-www-form-urlencoded" { + t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType) + } + body, _ := ioutil.ReadAll(r.Body) + if string(body) != "client_id=CLIENT_ID&grant_type=refresh_token&refresh_token=REFRESH_TOKEN" { + t.Errorf("Unexpected refresh token payload, %v is found.", string(body)) + } + })) + defer ts.Close() + conf := newConf(ts.URL) + c := conf.Client(NoContext, nil) + _, err := c.Get(ts.URL + "/somethingelse") + if err == nil { + t.Errorf("Fetch should return an error if no refresh token is set") + } +} + +func TestRefreshToken_RefreshTokenReplacement(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"access_token":"ACCESS TOKEN", "scope": "user", "token_type": "bearer", "refresh_token": "NEW REFRESH TOKEN"}`)) + return + })) + defer ts.Close() + conf := newConf(ts.URL) + tkr := tokenRefresher{ + conf: conf, + ctx: NoContext, + refreshToken: "OLD REFRESH TOKEN", + } + tk, err := tkr.Token() + if err != nil { + t.Errorf("Unexpected refreshToken error returned: %v", err) + return + } + if tk.RefreshToken != tkr.refreshToken { + t.Errorf("tokenRefresher.refresh_token = %s; want %s", tkr.refreshToken, tk.RefreshToken) + } +} + +func TestConfigClientWithToken(t *testing.T) { + tok := &Token{ + AccessToken: "abc123", + } + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if got, want := r.Header.Get("Authorization"), fmt.Sprintf("Bearer %s", tok.AccessToken); got != want { + t.Errorf("Authorization header = %q; want %q", got, want) + } + return + })) + defer ts.Close() + conf := newConf(ts.URL) + + c := conf.Client(NoContext, tok) + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Error(err) + } + _, err = c.Do(req) + if err != nil { + t.Error(err) + } +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go b/Godeps/_workspace/src/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go new file mode 100644 index 000000000..f0b66f97d --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go @@ -0,0 +1,16 @@ +// Copyright 2015 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package odnoklassniki provides constants for using OAuth2 to access Odnoklassniki. +package odnoklassniki + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is Odnoklassniki's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://www.odnoklassniki.ru/oauth/authorize", + TokenURL: "https://api.odnoklassniki.ru/oauth/token.do", +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/paypal/paypal.go b/Godeps/_workspace/src/golang.org/x/oauth2/paypal/paypal.go new file mode 100644 index 000000000..a99366b6e --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/paypal/paypal.go @@ -0,0 +1,22 @@ +// Copyright 2015 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package paypal provides constants for using OAuth2 to access PayPal. +package paypal + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is PayPal's OAuth 2.0 endpoint in live (production) environment. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://www.paypal.com/webapps/auth/protocol/openidconnect/v1/authorize", + TokenURL: "https://api.paypal.com/v1/identity/openidconnect/tokenservice", +} + +// SandboxEndpoint is PayPal's OAuth 2.0 endpoint in sandbox (testing) environment. +var SandboxEndpoint = oauth2.Endpoint{ + AuthURL: "https://www.sandbox.paypal.com/webapps/auth/protocol/openidconnect/v1/authorize", + TokenURL: "https://api.sandbox.paypal.com/v1/identity/openidconnect/tokenservice", +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/token.go b/Godeps/_workspace/src/golang.org/x/oauth2/token.go new file mode 100644 index 000000000..ebbdddbdc --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/token.go @@ -0,0 +1,143 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" +) + +// expiryDelta determines how earlier a token should be considered +// expired than its actual expiration time. It is used to avoid late +// expirations due to client-server time mismatches. +const expiryDelta = 10 * time.Second + +// Token represents the crendentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// Most users of this package should not access fields of Token +// directly. They're exported mostly for use by related packages +// implementing derivative OAuth2 flows. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string `json:"access_token"` + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string `json:"token_type,omitempty"` + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string `json:"refresh_token,omitempty"` + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time `json:"expiry,omitempty"` + + // raw optionally contains extra metadata from the server + // when updating a token. + raw interface{} +} + +// Type returns t.TokenType if non-empty, else "Bearer". +func (t *Token) Type() string { + if strings.EqualFold(t.TokenType, "bearer") { + return "Bearer" + } + if strings.EqualFold(t.TokenType, "mac") { + return "MAC" + } + if strings.EqualFold(t.TokenType, "basic") { + return "Basic" + } + if t.TokenType != "" { + return t.TokenType + } + return "Bearer" +} + +// SetAuthHeader sets the Authorization header to r using the access +// token in t. +// +// This method is unnecessary when using Transport or an HTTP Client +// returned by this package. +func (t *Token) SetAuthHeader(r *http.Request) { + r.Header.Set("Authorization", t.Type()+" "+t.AccessToken) +} + +// WithExtra returns a new Token that's a clone of t, but using the +// provided raw extra map. This is only intended for use by packages +// implementing derivative OAuth2 flows. +func (t *Token) WithExtra(extra interface{}) *Token { + t2 := new(Token) + *t2 = *t + t2.raw = extra + return t2 +} + +// Extra returns an extra field. +// Extra fields are key-value pairs returned by the server as a +// part of the token retrieval response. +func (t *Token) Extra(key string) interface{} { + if vals, ok := t.raw.(url.Values); ok { + // TODO(jbd): Cast numeric values to int64 or float64. + return vals.Get(key) + } + if raw, ok := t.raw.(map[string]interface{}); ok { + return raw[key] + } + return nil +} + +// expired reports whether the token is expired. +// t must be non-nil. +func (t *Token) expired() bool { + if t.Expiry.IsZero() { + return false + } + return t.Expiry.Add(-expiryDelta).Before(time.Now()) +} + +// Valid reports whether t is non-nil, has an AccessToken, and is not expired. +func (t *Token) Valid() bool { + return t != nil && t.AccessToken != "" && !t.expired() +} + +// tokenFromInternal maps an *internal.Token struct into +// a *Token struct. +func tokenFromInternal(t *internal.Token) *Token { + if t == nil { + return nil + } + return &Token{ + AccessToken: t.AccessToken, + TokenType: t.TokenType, + RefreshToken: t.RefreshToken, + Expiry: t.Expiry, + raw: t.Raw, + } +} + +// retrieveToken takes a *Config and uses that to retrieve an *internal.Token. +// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along +// with an error.. +func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { + tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v) + if err != nil { + return nil, err + } + return tokenFromInternal(tk), nil +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/token_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/token_test.go new file mode 100644 index 000000000..739eeb2a2 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/token_test.go @@ -0,0 +1,50 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "testing" + "time" +) + +func TestTokenExtra(t *testing.T) { + type testCase struct { + key string + val interface{} + want interface{} + } + const key = "extra-key" + cases := []testCase{ + {key: key, val: "abc", want: "abc"}, + {key: key, val: 123, want: 123}, + {key: key, val: "", want: ""}, + {key: "other-key", val: "def", want: nil}, + } + for _, tc := range cases { + extra := make(map[string]interface{}) + extra[tc.key] = tc.val + tok := &Token{raw: extra} + if got, want := tok.Extra(key), tc.want; got != want { + t.Errorf("Extra(%q) = %q; want %q", key, got, want) + } + } +} + +func TestTokenExpiry(t *testing.T) { + now := time.Now() + cases := []struct { + name string + tok *Token + want bool + }{ + {name: "12 seconds", tok: &Token{Expiry: now.Add(12 * time.Second)}, want: false}, + {name: "10 seconds", tok: &Token{Expiry: now.Add(expiryDelta)}, want: true}, + } + for _, tc := range cases { + if got, want := tc.tok.expired(), tc.want; got != want { + t.Errorf("expired (%q) = %v; want %v", tc.name, got, want) + } + } +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/transport.go b/Godeps/_workspace/src/golang.org/x/oauth2/transport.go new file mode 100644 index 000000000..90db08833 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/transport.go @@ -0,0 +1,132 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "errors" + "io" + "net/http" + "sync" +) + +// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests, +// wrapping a base RoundTripper and adding an Authorization header +// with a token from the supplied Sources. +// +// Transport is a low-level mechanism. Most code will use the +// higher-level Config.Client method instead. +type Transport struct { + // Source supplies the token to add to outgoing requests' + // Authorization headers. + Source TokenSource + + // Base is the base RoundTripper used to make HTTP requests. + // If nil, http.DefaultTransport is used. + Base http.RoundTripper + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// RoundTrip authorizes and authenticates the request with an +// access token. If no token exists or token is expired, +// tries to refresh/fetch a new token. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + if t.Source == nil { + return nil, errors.New("oauth2: Transport's Source is nil") + } + token, err := t.Source.Token() + if err != nil { + return nil, err + } + + req2 := cloneRequest(req) // per RoundTripper contract + token.SetAuthHeader(req2) + t.setModReq(req, req2) + res, err := t.base().RoundTrip(req2) + if err != nil { + t.setModReq(req, nil) + return nil, err + } + res.Body = &onEOFReader{ + rc: res.Body, + fn: func() { t.setModReq(req, nil) }, + } + return res, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *Transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + t.mu.Lock() + modReq := t.modReq[req] + delete(t.modReq, req) + t.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +func (t *Transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +func (t *Transport) setModReq(orig, mod *http.Request) { + t.mu.Lock() + defer t.mu.Unlock() + if t.modReq == nil { + t.modReq = make(map[*http.Request]*http.Request) + } + if mod == nil { + delete(t.modReq, orig) + } else { + t.modReq[orig] = mod + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + return r2 +} + +type onEOFReader struct { + rc io.ReadCloser + fn func() +} + +func (r *onEOFReader) Read(p []byte) (n int, err error) { + n, err = r.rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +func (r *onEOFReader) Close() error { + err := r.rc.Close() + r.runFunc() + return err +} + +func (r *onEOFReader) runFunc() { + if fn := r.fn; fn != nil { + fn() + r.fn = nil + } +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/transport_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/transport_test.go new file mode 100644 index 000000000..35cb25ed5 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/transport_test.go @@ -0,0 +1,86 @@ +package oauth2 + +import ( + "net/http" + "net/http/httptest" + "testing" + "time" +) + +type tokenSource struct{ token *Token } + +func (t *tokenSource) Token() (*Token, error) { + return t.token, nil +} + +func TestTransportTokenSource(t *testing.T) { + ts := &tokenSource{ + token: &Token{ + AccessToken: "abc", + }, + } + tr := &Transport{ + Source: ts, + } + server := newMockServer(func(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("Authorization") != "Bearer abc" { + t.Errorf("Transport doesn't set the Authorization header from the fetched token") + } + }) + defer server.Close() + client := http.Client{Transport: tr} + client.Get(server.URL) +} + +// Test for case-sensitive token types, per https://github.com/golang/oauth2/issues/113 +func TestTransportTokenSourceTypes(t *testing.T) { + const val = "abc" + tests := []struct { + key string + val string + want string + }{ + {key: "bearer", val: val, want: "Bearer abc"}, + {key: "mac", val: val, want: "MAC abc"}, + {key: "basic", val: val, want: "Basic abc"}, + } + for _, tc := range tests { + ts := &tokenSource{ + token: &Token{ + AccessToken: tc.val, + TokenType: tc.key, + }, + } + tr := &Transport{ + Source: ts, + } + server := newMockServer(func(w http.ResponseWriter, r *http.Request) { + if got, want := r.Header.Get("Authorization"), tc.want; got != want { + t.Errorf("Authorization header (%q) = %q; want %q", val, got, want) + } + }) + defer server.Close() + client := http.Client{Transport: tr} + client.Get(server.URL) + } +} + +func TestTokenValidNoAccessToken(t *testing.T) { + token := &Token{} + if token.Valid() { + t.Errorf("Token should not be valid with no access token") + } +} + +func TestExpiredWithExpiry(t *testing.T) { + token := &Token{ + Expiry: time.Now().Add(-5 * time.Hour), + } + if token.Valid() { + t.Errorf("Token should not be valid if it expired in the past") + } +} + +func newMockServer(handler func(w http.ResponseWriter, r *http.Request)) *httptest.Server { + return httptest.NewServer(http.HandlerFunc(handler)) +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/vk/vk.go b/Godeps/_workspace/src/golang.org/x/oauth2/vk/vk.go new file mode 100644 index 000000000..00e929357 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/vk/vk.go @@ -0,0 +1,16 @@ +// Copyright 2015 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package vk provides constants for using OAuth2 to access VK.com. +package vk + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is VK's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://oauth.vk.com/authorize", + TokenURL: "https://oauth.vk.com/access_token", +} diff --git a/Godeps/_workspace/src/google.golang.org/api/googleapi/googleapi.go b/Godeps/_workspace/src/google.golang.org/api/googleapi/googleapi.go new file mode 100644 index 000000000..ffc0ffbca --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/api/googleapi/googleapi.go @@ -0,0 +1,566 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package googleapi contains the common code shared by all Google API +// libraries. +package googleapi + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "mime/multipart" + "net/http" + "net/textproto" + "net/url" + "regexp" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "google.golang.org/api/googleapi/internal/uritemplates" +) + +// ContentTyper is an interface for Readers which know (or would like +// to override) their Content-Type. If a media body doesn't implement +// ContentTyper, the type is sniffed from the content using +// http.DetectContentType. +type ContentTyper interface { + ContentType() string +} + +// A SizeReaderAt is a ReaderAt with a Size method. +// An io.SectionReader implements SizeReaderAt. +type SizeReaderAt interface { + io.ReaderAt + Size() int64 +} + +const ( + Version = "0.5" + + // statusResumeIncomplete is the code returned by the Google uploader when the transfer is not yet complete. + statusResumeIncomplete = 308 + + // UserAgent is the header string used to identify this package. + UserAgent = "google-api-go-client/" + Version + + // uploadPause determines the delay between failed upload attempts + uploadPause = 1 * time.Second +) + +// Error contains an error response from the server. +type Error struct { + // Code is the HTTP response status code and will always be populated. + Code int `json:"code"` + // Message is the server response message and is only populated when + // explicitly referenced by the JSON server response. + Message string `json:"message"` + // Body is the raw response returned by the server. + // It is often but not always JSON, depending on how the request fails. + Body string + + Errors []ErrorItem +} + +// ErrorItem is a detailed error code & message from the Google API frontend. +type ErrorItem struct { + // Reason is the typed error code. For example: "some_example". + Reason string `json:"reason"` + // Message is the human-readable description of the error. + Message string `json:"message"` +} + +func (e *Error) Error() string { + if len(e.Errors) == 0 && e.Message == "" { + return fmt.Sprintf("googleapi: got HTTP response code %d with body: %v", e.Code, e.Body) + } + var buf bytes.Buffer + fmt.Fprintf(&buf, "googleapi: Error %d: ", e.Code) + if e.Message != "" { + fmt.Fprintf(&buf, "%s", e.Message) + } + if len(e.Errors) == 0 { + return strings.TrimSpace(buf.String()) + } + if len(e.Errors) == 1 && e.Errors[0].Message == e.Message { + fmt.Fprintf(&buf, ", %s", e.Errors[0].Reason) + return buf.String() + } + fmt.Fprintln(&buf, "\nMore details:") + for _, v := range e.Errors { + fmt.Fprintf(&buf, "Reason: %s, Message: %s\n", v.Reason, v.Message) + } + return buf.String() +} + +type errorReply struct { + Error *Error `json:"error"` +} + +// CheckResponse returns an error (of type *Error) if the response +// status code is not 2xx. +func CheckResponse(res *http.Response) error { + if res.StatusCode >= 200 && res.StatusCode <= 299 { + return nil + } + slurp, err := ioutil.ReadAll(res.Body) + if err == nil { + jerr := new(errorReply) + err = json.Unmarshal(slurp, jerr) + if err == nil && jerr.Error != nil { + if jerr.Error.Code == 0 { + jerr.Error.Code = res.StatusCode + } + jerr.Error.Body = string(slurp) + return jerr.Error + } + } + return &Error{ + Code: res.StatusCode, + Body: string(slurp), + } +} + +// CheckMediaResponse returns an error (of type *Error) if the response +// status code is not 2xx. Unlike CheckResponse it does not assume the +// body is a JSON error document. +func CheckMediaResponse(res *http.Response) error { + if res.StatusCode >= 200 && res.StatusCode <= 299 { + return nil + } + slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20)) + res.Body.Close() + return &Error{ + Code: res.StatusCode, + Body: string(slurp), + } +} + +type MarshalStyle bool + +var WithDataWrapper = MarshalStyle(true) +var WithoutDataWrapper = MarshalStyle(false) + +func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) { + buf := new(bytes.Buffer) + if wrap { + buf.Write([]byte(`{"data": `)) + } + err := json.NewEncoder(buf).Encode(v) + if err != nil { + return nil, err + } + if wrap { + buf.Write([]byte(`}`)) + } + return buf, nil +} + +func getMediaType(media io.Reader) (io.Reader, string) { + if typer, ok := media.(ContentTyper); ok { + return media, typer.ContentType() + } + + pr, pw := io.Pipe() + typ := "application/octet-stream" + buf, err := ioutil.ReadAll(io.LimitReader(media, 512)) + if err != nil { + pw.CloseWithError(fmt.Errorf("error reading media: %v", err)) + return pr, typ + } + typ = http.DetectContentType(buf) + mr := io.MultiReader(bytes.NewReader(buf), media) + go func() { + _, err = io.Copy(pw, mr) + if err != nil { + pw.CloseWithError(fmt.Errorf("error reading media: %v", err)) + return + } + pw.Close() + }() + return pr, typ +} + +// DetectMediaType detects and returns the content type of the provided media. +// If the type can not be determined, "application/octet-stream" is returned. +func DetectMediaType(media io.ReaderAt) string { + if typer, ok := media.(ContentTyper); ok { + return typer.ContentType() + } + + typ := "application/octet-stream" + buf := make([]byte, 1024) + n, err := media.ReadAt(buf, 0) + buf = buf[:n] + if err == nil || err == io.EOF { + typ = http.DetectContentType(buf) + } + return typ +} + +type Lengther interface { + Len() int +} + +// endingWithErrorReader from r until it returns an error. If the +// final error from r is io.EOF and e is non-nil, e is used instead. +type endingWithErrorReader struct { + r io.Reader + e error +} + +func (er endingWithErrorReader) Read(p []byte) (n int, err error) { + n, err = er.r.Read(p) + if err == io.EOF && er.e != nil { + err = er.e + } + return +} + +func typeHeader(contentType string) textproto.MIMEHeader { + h := make(textproto.MIMEHeader) + h.Set("Content-Type", contentType) + return h +} + +// countingWriter counts the number of bytes it receives to write, but +// discards them. +type countingWriter struct { + n *int64 +} + +func (w countingWriter) Write(p []byte) (int, error) { + *w.n += int64(len(p)) + return len(p), nil +} + +// ConditionallyIncludeMedia does nothing if media is nil. +// +// bodyp is an in/out parameter. It should initially point to the +// reader of the application/json (or whatever) payload to send in the +// API request. It's updated to point to the multipart body reader. +// +// ctypep is an in/out parameter. It should initially point to the +// content type of the bodyp, usually "application/json". It's updated +// to the "multipart/related" content type, with random boundary. +// +// The return value is the content-length of the entire multpart body. +func ConditionallyIncludeMedia(media io.Reader, bodyp *io.Reader, ctypep *string) (cancel func(), ok bool) { + if media == nil { + return + } + // Get the media type, which might return a different reader instance. + var mediaType string + media, mediaType = getMediaType(media) + + body, bodyType := *bodyp, *ctypep + + pr, pw := io.Pipe() + mpw := multipart.NewWriter(pw) + *bodyp = pr + *ctypep = "multipart/related; boundary=" + mpw.Boundary() + go func() { + w, err := mpw.CreatePart(typeHeader(bodyType)) + if err != nil { + mpw.Close() + pw.CloseWithError(fmt.Errorf("googleapi: body CreatePart failed: %v", err)) + return + } + _, err = io.Copy(w, body) + if err != nil { + mpw.Close() + pw.CloseWithError(fmt.Errorf("googleapi: body Copy failed: %v", err)) + return + } + + w, err = mpw.CreatePart(typeHeader(mediaType)) + if err != nil { + mpw.Close() + pw.CloseWithError(fmt.Errorf("googleapi: media CreatePart failed: %v", err)) + return + } + _, err = io.Copy(w, media) + if err != nil { + mpw.Close() + pw.CloseWithError(fmt.Errorf("googleapi: media Copy failed: %v", err)) + return + } + mpw.Close() + pw.Close() + }() + cancel = func() { pw.CloseWithError(errAborted) } + return cancel, true +} + +var errAborted = errors.New("googleapi: upload aborted") + +// ProgressUpdater is a function that is called upon every progress update of a resumable upload. +// This is the only part of a resumable upload (from googleapi) that is usable by the developer. +// The remaining usable pieces of resumable uploads is exposed in each auto-generated API. +type ProgressUpdater func(current, total int64) + +// ResumableUpload is used by the generated APIs to provide resumable uploads. +// It is not used by developers directly. +type ResumableUpload struct { + Client *http.Client + // URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable". + URI string + UserAgent string // User-Agent for header of the request + // Media is the object being uploaded. + Media io.ReaderAt + // MediaType defines the media type, e.g. "image/jpeg". + MediaType string + // ContentLength is the full size of the object being uploaded. + ContentLength int64 + + mu sync.Mutex // guards progress + progress int64 // number of bytes uploaded so far + started bool // whether the upload has been started + + // Callback is an optional function that will be called upon every progress update. + Callback ProgressUpdater +} + +var ( + // rangeRE matches the transfer status response from the server. $1 is the last byte index uploaded. + rangeRE = regexp.MustCompile(`^bytes=0\-(\d+)$`) + // chunkSize is the size of the chunks created during a resumable upload and should be a power of two. + // 1<<18 is the minimum size supported by the Google uploader, and there is no maximum. + chunkSize int64 = 1 << 18 +) + +// Progress returns the number of bytes uploaded at this point. +func (rx *ResumableUpload) Progress() int64 { + rx.mu.Lock() + defer rx.mu.Unlock() + return rx.progress +} + +func (rx *ResumableUpload) transferStatus() (int64, *http.Response, error) { + req, _ := http.NewRequest("POST", rx.URI, nil) + req.ContentLength = 0 + req.Header.Set("User-Agent", rx.UserAgent) + req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", rx.ContentLength)) + res, err := rx.Client.Do(req) + if err != nil || res.StatusCode != statusResumeIncomplete { + return 0, res, err + } + var start int64 + if m := rangeRE.FindStringSubmatch(res.Header.Get("Range")); len(m) == 2 { + start, err = strconv.ParseInt(m[1], 10, 64) + if err != nil { + return 0, nil, fmt.Errorf("unable to parse range size %v", m[1]) + } + start += 1 // Start at the next byte + } + return start, res, nil +} + +type chunk struct { + body io.Reader + size int64 + err error +} + +func (rx *ResumableUpload) transferChunks(ctx context.Context) (*http.Response, error) { + var start int64 + var err error + res := &http.Response{} + if rx.started { + start, res, err = rx.transferStatus() + if err != nil || res.StatusCode != statusResumeIncomplete { + return res, err + } + } + rx.started = true + + for { + select { // Check for cancellation + case <-ctx.Done(): + res.StatusCode = http.StatusRequestTimeout + return res, ctx.Err() + default: + } + reqSize := rx.ContentLength - start + if reqSize > chunkSize { + reqSize = chunkSize + } + r := io.NewSectionReader(rx.Media, start, reqSize) + req, _ := http.NewRequest("POST", rx.URI, r) + req.ContentLength = reqSize + req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength)) + req.Header.Set("Content-Type", rx.MediaType) + req.Header.Set("User-Agent", rx.UserAgent) + res, err = rx.Client.Do(req) + start += reqSize + if err == nil && (res.StatusCode == statusResumeIncomplete || res.StatusCode == http.StatusOK) { + rx.mu.Lock() + rx.progress = start // keep track of number of bytes sent so far + rx.mu.Unlock() + if rx.Callback != nil { + rx.Callback(start, rx.ContentLength) + } + } + if err != nil || res.StatusCode != statusResumeIncomplete { + break + } + } + return res, err +} + +var sleep = time.Sleep // override in unit tests + +// Upload starts the process of a resumable upload with a cancellable context. +// It retries indefinitely (with a pause of uploadPause between attempts) until cancelled. +// It is called from the auto-generated API code and is not visible to the user. +// rx is private to the auto-generated API code. +func (rx *ResumableUpload) Upload(ctx context.Context) (*http.Response, error) { + var res *http.Response + var err error + for { + res, err = rx.transferChunks(ctx) + if err != nil || res.StatusCode == http.StatusCreated || res.StatusCode == http.StatusOK { + return res, err + } + select { // Check for cancellation + case <-ctx.Done(): + res.StatusCode = http.StatusRequestTimeout + return res, ctx.Err() + default: + } + sleep(uploadPause) + } + return res, err +} + +func ResolveRelative(basestr, relstr string) string { + u, _ := url.Parse(basestr) + rel, _ := url.Parse(relstr) + u = u.ResolveReference(rel) + us := u.String() + us = strings.Replace(us, "%7B", "{", -1) + us = strings.Replace(us, "%7D", "}", -1) + return us +} + +// has4860Fix is whether this Go environment contains the fix for +// http://golang.org/issue/4860 +var has4860Fix bool + +// init initializes has4860Fix by checking the behavior of the net/http package. +func init() { + r := http.Request{ + URL: &url.URL{ + Scheme: "http", + Opaque: "//opaque", + }, + } + b := &bytes.Buffer{} + r.Write(b) + has4860Fix = bytes.HasPrefix(b.Bytes(), []byte("GET http")) +} + +// SetOpaque sets u.Opaque from u.Path such that HTTP requests to it +// don't alter any hex-escaped characters in u.Path. +func SetOpaque(u *url.URL) { + u.Opaque = "//" + u.Host + u.Path + if !has4860Fix { + u.Opaque = u.Scheme + ":" + u.Opaque + } +} + +// Expand subsitutes any {encoded} strings in the URL passed in using +// the map supplied. +// +// This calls SetOpaque to avoid encoding of the parameters in the URL path. +func Expand(u *url.URL, expansions map[string]string) { + expanded, err := uritemplates.Expand(u.Path, expansions) + if err == nil { + u.Path = expanded + SetOpaque(u) + } +} + +// CloseBody is used to close res.Body. +// Prior to calling Close, it also tries to Read a small amount to see an EOF. +// Not seeing an EOF can prevent HTTP Transports from reusing connections. +func CloseBody(res *http.Response) { + if res == nil || res.Body == nil { + return + } + // Justification for 3 byte reads: two for up to "\r\n" after + // a JSON/XML document, and then 1 to see EOF if we haven't yet. + // TODO(bradfitz): detect Go 1.3+ and skip these reads. + // See https://codereview.appspot.com/58240043 + // and https://codereview.appspot.com/49570044 + buf := make([]byte, 1) + for i := 0; i < 3; i++ { + _, err := res.Body.Read(buf) + if err != nil { + break + } + } + res.Body.Close() + +} + +// VariantType returns the type name of the given variant. +// If the map doesn't contain the named key or the value is not a []interface{}, "" is returned. +// This is used to support "variant" APIs that can return one of a number of different types. +func VariantType(t map[string]interface{}) string { + s, _ := t["type"].(string) + return s +} + +// ConvertVariant uses the JSON encoder/decoder to fill in the struct 'dst' with the fields found in variant 'v'. +// This is used to support "variant" APIs that can return one of a number of different types. +// It reports whether the conversion was successful. +func ConvertVariant(v map[string]interface{}, dst interface{}) bool { + var buf bytes.Buffer + err := json.NewEncoder(&buf).Encode(v) + if err != nil { + return false + } + return json.Unmarshal(buf.Bytes(), dst) == nil +} + +// A Field names a field to be retrieved with a partial response. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// +// Partial responses can dramatically reduce the amount of data that must be sent to your application. +// In order to request partial responses, you can specify the full list of fields +// that your application needs by adding the Fields option to your request. +// +// Field strings use camelCase with leading lower-case characters to identify fields within the response. +// +// For example, if your response has a "NextPageToken" and a slice of "Items" with "Id" fields, +// you could request just those fields like this: +// +// svc.Events.List().Fields("nextPageToken", "items/id").Do() +// +// or if you were also interested in each Item's "Updated" field, you can combine them like this: +// +// svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do() +// +// More information about field formatting can be found here: +// https://developers.google.com/+/api/#fields-syntax +// +// Another way to find field names is through the Google API explorer: +// https://developers.google.com/apis-explorer/#p/ +type Field string + +// CombineFields combines fields into a single string. +func CombineFields(s []Field) string { + r := make([]string, len(s)) + for i, v := range s { + r[i] = string(v) + } + return strings.Join(r, ",") +} diff --git a/Godeps/_workspace/src/google.golang.org/api/googleapi/googleapi_test.go b/Godeps/_workspace/src/google.golang.org/api/googleapi/googleapi_test.go new file mode 100644 index 000000000..c4989f70a --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/api/googleapi/googleapi_test.go @@ -0,0 +1,598 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package googleapi + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "reflect" + "regexp" + "strconv" + "strings" + "testing" + "time" + + "golang.org/x/net/context" +) + +type SetOpaqueTest struct { + in *url.URL + wantRequestURI string +} + +var setOpaqueTests = []SetOpaqueTest{ + // no path + { + &url.URL{ + Scheme: "http", + Host: "www.golang.org", + }, + "http://www.golang.org", + }, + // path + { + &url.URL{ + Scheme: "http", + Host: "www.golang.org", + Path: "/", + }, + "http://www.golang.org/", + }, + // file with hex escaping + { + &url.URL{ + Scheme: "https", + Host: "www.golang.org", + Path: "/file%20one&two", + }, + "https://www.golang.org/file%20one&two", + }, + // query + { + &url.URL{ + Scheme: "http", + Host: "www.golang.org", + Path: "/", + RawQuery: "q=go+language", + }, + "http://www.golang.org/?q=go+language", + }, + // file with hex escaping in path plus query + { + &url.URL{ + Scheme: "https", + Host: "www.golang.org", + Path: "/file%20one&two", + RawQuery: "q=go+language", + }, + "https://www.golang.org/file%20one&two?q=go+language", + }, + // query with hex escaping + { + &url.URL{ + Scheme: "http", + Host: "www.golang.org", + Path: "/", + RawQuery: "q=go%20language", + }, + "http://www.golang.org/?q=go%20language", + }, +} + +// prefixTmpl is a template for the expected prefix of the output of writing +// an HTTP request. +const prefixTmpl = "GET %v HTTP/1.1\r\nHost: %v\r\n" + +func TestSetOpaque(t *testing.T) { + for _, test := range setOpaqueTests { + u := *test.in + SetOpaque(&u) + + w := &bytes.Buffer{} + r := &http.Request{URL: &u} + if err := r.Write(w); err != nil { + t.Errorf("write request: %v", err) + continue + } + + prefix := fmt.Sprintf(prefixTmpl, test.wantRequestURI, test.in.Host) + if got := string(w.Bytes()); !strings.HasPrefix(got, prefix) { + t.Errorf("got %q expected prefix %q", got, prefix) + } + } +} + +type ExpandTest struct { + in string + expansions map[string]string + want string +} + +var expandTests = []ExpandTest{ + // no expansions + { + "http://www.golang.org/", + map[string]string{}, + "http://www.golang.org/", + }, + // one expansion, no escaping + { + "http://www.golang.org/{bucket}/delete", + map[string]string{ + "bucket": "red", + }, + "http://www.golang.org/red/delete", + }, + // one expansion, with hex escapes + { + "http://www.golang.org/{bucket}/delete", + map[string]string{ + "bucket": "red/blue", + }, + "http://www.golang.org/red%2Fblue/delete", + }, + // one expansion, with space + { + "http://www.golang.org/{bucket}/delete", + map[string]string{ + "bucket": "red or blue", + }, + "http://www.golang.org/red%20or%20blue/delete", + }, + // expansion not found + { + "http://www.golang.org/{object}/delete", + map[string]string{ + "bucket": "red or blue", + }, + "http://www.golang.org//delete", + }, + // multiple expansions + { + "http://www.golang.org/{one}/{two}/{three}/get", + map[string]string{ + "one": "ONE", + "two": "TWO", + "three": "THREE", + }, + "http://www.golang.org/ONE/TWO/THREE/get", + }, + // utf-8 characters + { + "http://www.golang.org/{bucket}/get", + map[string]string{ + "bucket": "£100", + }, + "http://www.golang.org/%C2%A3100/get", + }, + // punctuations + { + "http://www.golang.org/{bucket}/get", + map[string]string{ + "bucket": `/\@:,.`, + }, + "http://www.golang.org/%2F%5C%40%3A%2C./get", + }, + // mis-matched brackets + { + "http://www.golang.org/{bucket/get", + map[string]string{ + "bucket": "red", + }, + "http://www.golang.org/{bucket/get", + }, + // "+" prefix for suppressing escape + // See also: http://tools.ietf.org/html/rfc6570#section-3.2.3 + { + "http://www.golang.org/{+topic}", + map[string]string{ + "topic": "/topics/myproject/mytopic", + }, + // The double slashes here look weird, but it's intentional + "http://www.golang.org//topics/myproject/mytopic", + }, +} + +func TestExpand(t *testing.T) { + for i, test := range expandTests { + u := url.URL{ + Path: test.in, + } + Expand(&u, test.expansions) + got := u.Path + if got != test.want { + t.Errorf("got %q expected %q in test %d", got, test.want, i+1) + } + } +} + +type CheckResponseTest struct { + in *http.Response + bodyText string + want error + errText string +} + +var checkResponseTests = []CheckResponseTest{ + { + &http.Response{ + StatusCode: http.StatusOK, + }, + "", + nil, + "", + }, + { + &http.Response{ + StatusCode: http.StatusInternalServerError, + }, + `{"error":{}}`, + &Error{ + Code: http.StatusInternalServerError, + Body: `{"error":{}}`, + }, + `googleapi: got HTTP response code 500 with body: {"error":{}}`, + }, + { + &http.Response{ + StatusCode: http.StatusNotFound, + }, + `{"error":{"message":"Error message for StatusNotFound."}}`, + &Error{ + Code: http.StatusNotFound, + Message: "Error message for StatusNotFound.", + Body: `{"error":{"message":"Error message for StatusNotFound."}}`, + }, + "googleapi: Error 404: Error message for StatusNotFound.", + }, + { + &http.Response{ + StatusCode: http.StatusBadRequest, + }, + `{"error":"invalid_token","error_description":"Invalid Value"}`, + &Error{ + Code: http.StatusBadRequest, + Body: `{"error":"invalid_token","error_description":"Invalid Value"}`, + }, + `googleapi: got HTTP response code 400 with body: {"error":"invalid_token","error_description":"Invalid Value"}`, + }, + { + &http.Response{ + StatusCode: http.StatusBadRequest, + }, + `{"error":{"errors":[{"domain":"usageLimits","reason":"keyInvalid","message":"Bad Request"}],"code":400,"message":"Bad Request"}}`, + &Error{ + Code: http.StatusBadRequest, + Errors: []ErrorItem{ + { + Reason: "keyInvalid", + Message: "Bad Request", + }, + }, + Body: `{"error":{"errors":[{"domain":"usageLimits","reason":"keyInvalid","message":"Bad Request"}],"code":400,"message":"Bad Request"}}`, + Message: "Bad Request", + }, + "googleapi: Error 400: Bad Request, keyInvalid", + }, +} + +func TestCheckResponse(t *testing.T) { + for _, test := range checkResponseTests { + res := test.in + if test.bodyText != "" { + res.Body = ioutil.NopCloser(strings.NewReader(test.bodyText)) + } + g := CheckResponse(res) + if !reflect.DeepEqual(g, test.want) { + t.Errorf("CheckResponse: got %v, want %v", g, test.want) + gotJson, err := json.Marshal(g) + if err != nil { + t.Error(err) + } + wantJson, err := json.Marshal(test.want) + if err != nil { + t.Error(err) + } + t.Errorf("json(got): %q\njson(want): %q", string(gotJson), string(wantJson)) + } + if g != nil && g.Error() != test.errText { + t.Errorf("CheckResponse: unexpected error message.\nGot: %q\nwant: %q", g, test.errText) + } + } +} + +type VariantPoint struct { + Type string + Coordinates []float64 +} + +type VariantTest struct { + in map[string]interface{} + result bool + want VariantPoint +} + +var coords = []interface{}{1.0, 2.0} + +var variantTests = []VariantTest{ + { + in: map[string]interface{}{ + "type": "Point", + "coordinates": coords, + }, + result: true, + want: VariantPoint{ + Type: "Point", + Coordinates: []float64{1.0, 2.0}, + }, + }, + { + in: map[string]interface{}{ + "type": "Point", + "bogus": coords, + }, + result: true, + want: VariantPoint{ + Type: "Point", + }, + }, +} + +func TestVariantType(t *testing.T) { + for _, test := range variantTests { + if g := VariantType(test.in); g != test.want.Type { + t.Errorf("VariantType(%v): got %v, want %v", test.in, g, test.want.Type) + } + } +} + +func TestConvertVariant(t *testing.T) { + for _, test := range variantTests { + g := VariantPoint{} + r := ConvertVariant(test.in, &g) + if r != test.result { + t.Errorf("ConvertVariant(%v): got %v, want %v", test.in, r, test.result) + } + if !reflect.DeepEqual(g, test.want) { + t.Errorf("ConvertVariant(%v): got %v, want %v", test.in, g, test.want) + } + } +} + +type unexpectedReader struct{} + +func (unexpectedReader) Read([]byte) (int, error) { + return 0, fmt.Errorf("unexpected read in test.") +} + +var contentRangeRE = regexp.MustCompile(`^bytes (\d+)\-(\d+)/(\d+)$`) + +func (t *testTransport) RoundTrip(req *http.Request) (*http.Response, error) { + t.req = req + if rng := req.Header.Get("Content-Range"); rng != "" && !strings.HasPrefix(rng, "bytes */") { // Read the data + m := contentRangeRE.FindStringSubmatch(rng) + if len(m) != 4 { + return nil, fmt.Errorf("unable to parse content range: %v", rng) + } + start, err := strconv.ParseInt(m[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("unable to parse content range: %v", rng) + } + end, err := strconv.ParseInt(m[2], 10, 64) + if err != nil { + return nil, fmt.Errorf("unable to parse content range: %v", rng) + } + totalSize, err := strconv.ParseInt(m[3], 10, 64) + if err != nil { + return nil, fmt.Errorf("unable to parse content range: %v", rng) + } + partialSize := end - start + 1 + t.buf, err = ioutil.ReadAll(req.Body) + if err != nil || int64(len(t.buf)) != partialSize { + return nil, fmt.Errorf("unable to read %v bytes from request data, n=%v: %v", partialSize, len(t.buf), err) + } + if totalSize == end+1 { + t.statusCode = 200 // signify completion of transfer + } + } + f := ioutil.NopCloser(unexpectedReader{}) + res := &http.Response{ + Body: f, + StatusCode: t.statusCode, + Header: http.Header{}, + } + if t.rangeVal != "" { + res.Header.Set("Range", t.rangeVal) + } + return res, nil +} + +type testTransport struct { + req *http.Request + statusCode int + rangeVal string + want int64 + buf []byte +} + +var statusTests = []*testTransport{ + &testTransport{statusCode: 308, want: 0}, + &testTransport{statusCode: 308, rangeVal: "bytes=0-0", want: 1}, + &testTransport{statusCode: 308, rangeVal: "bytes=0-42", want: 43}, +} + +func TestTransferStatus(t *testing.T) { + for _, tr := range statusTests { + rx := &ResumableUpload{ + Client: &http.Client{Transport: tr}, + } + g, _, err := rx.transferStatus() + if err != nil { + t.Error(err) + } + if g != tr.want { + t.Errorf("transferStatus got %v, want %v", g, tr.want) + } + } +} + +func (t *interruptedTransport) RoundTrip(req *http.Request) (*http.Response, error) { + t.req = req + if rng := req.Header.Get("Content-Range"); rng != "" && !strings.HasPrefix(rng, "bytes */") { + t.interruptCount += 1 + if t.interruptCount%7 == 0 { // Respond with a "service unavailable" error + res := &http.Response{ + StatusCode: http.StatusServiceUnavailable, + Header: http.Header{}, + } + t.rangeVal = fmt.Sprintf("bytes=0-%v", len(t.buf)-1) // Set the response for next time + return res, nil + } + m := contentRangeRE.FindStringSubmatch(rng) + if len(m) != 4 { + return nil, fmt.Errorf("unable to parse content range: %v", rng) + } + start, err := strconv.ParseInt(m[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("unable to parse content range: %v", rng) + } + end, err := strconv.ParseInt(m[2], 10, 64) + if err != nil { + return nil, fmt.Errorf("unable to parse content range: %v", rng) + } + totalSize, err := strconv.ParseInt(m[3], 10, 64) + if err != nil { + return nil, fmt.Errorf("unable to parse content range: %v", rng) + } + partialSize := end - start + 1 + buf, err := ioutil.ReadAll(req.Body) + if err != nil || int64(len(buf)) != partialSize { + return nil, fmt.Errorf("unable to read %v bytes from request data, n=%v: %v", partialSize, len(buf), err) + } + t.buf = append(t.buf, buf...) + if totalSize == end+1 { + t.statusCode = 200 // signify completion of transfer + } + } + f := ioutil.NopCloser(unexpectedReader{}) + res := &http.Response{ + Body: f, + StatusCode: t.statusCode, + Header: http.Header{}, + } + if t.rangeVal != "" { + res.Header.Set("Range", t.rangeVal) + } + return res, nil +} + +type interruptedTransport struct { + req *http.Request + statusCode int + rangeVal string + interruptCount int + buf []byte + progressBuf string +} + +func (tr *interruptedTransport) ProgressUpdate(current, total int64) { + tr.progressBuf += fmt.Sprintf("%v, %v\n", current, total) +} + +func TestInterruptedTransferChunks(t *testing.T) { + f, err := os.Open("googleapi.go") + if err != nil { + t.Fatalf("unable to open googleapi.go: %v", err) + } + defer f.Close() + slurp, err := ioutil.ReadAll(f) + if err != nil { + t.Fatalf("unable to slurp file: %v", err) + } + st, err := f.Stat() + if err != nil { + t.Fatalf("unable to stat googleapi.go: %v", err) + } + tr := &interruptedTransport{ + statusCode: 308, + buf: make([]byte, 0, st.Size()), + } + oldChunkSize := chunkSize + defer func() { chunkSize = oldChunkSize }() + chunkSize = 100 // override to process small chunks for test. + + sleep = func(time.Duration) {} // override time.Sleep + rx := &ResumableUpload{ + Client: &http.Client{Transport: tr}, + Media: f, + MediaType: "text/plain", + ContentLength: st.Size(), + Callback: tr.ProgressUpdate, + } + res, err := rx.Upload(context.Background()) + if err != nil || res == nil || res.StatusCode != http.StatusOK { + if res == nil { + t.Errorf("transferChunks not successful, res=nil: %v", err) + } else { + t.Errorf("transferChunks not successful, statusCode=%v: %v", res.StatusCode, err) + } + } + if len(tr.buf) != len(slurp) || bytes.Compare(tr.buf, slurp) != 0 { + t.Errorf("transfered file corrupted:\ngot %s\nwant %s", tr.buf, slurp) + } + w := "" + for i := chunkSize; i <= st.Size(); i += chunkSize { + w += fmt.Sprintf("%v, %v\n", i, st.Size()) + } + if st.Size()%chunkSize != 0 { + w += fmt.Sprintf("%v, %v\n", st.Size(), st.Size()) + } + if tr.progressBuf != w { + t.Errorf("progress update error, got %v, want %v", tr.progressBuf, w) + } +} + +func TestCancelUpload(t *testing.T) { + f, err := os.Open("googleapi.go") + if err != nil { + t.Fatalf("unable to open googleapi.go: %v", err) + } + defer f.Close() + st, err := f.Stat() + if err != nil { + t.Fatalf("unable to stat googleapi.go: %v", err) + } + tr := &interruptedTransport{ + statusCode: 308, + buf: make([]byte, 0, st.Size()), + } + oldChunkSize := chunkSize + defer func() { chunkSize = oldChunkSize }() + chunkSize = 100 // override to process small chunks for test. + + sleep = func(time.Duration) {} // override time.Sleep + rx := &ResumableUpload{ + Client: &http.Client{Transport: tr}, + Media: f, + MediaType: "text/plain", + ContentLength: st.Size(), + Callback: tr.ProgressUpdate, + } + ctx, cancelFunc := context.WithCancel(context.Background()) + cancelFunc() // stop the upload that hasn't started yet + res, err := rx.Upload(ctx) + if err == nil || res == nil || res.StatusCode != http.StatusRequestTimeout { + if res == nil { + t.Errorf("transferChunks not successful, got res=nil, err=%v, want StatusRequestTimeout", err) + } else { + t.Errorf("transferChunks not successful, got statusCode=%v, err=%v, want StatusRequestTimeout", res.StatusCode, err) + } + } +} diff --git a/Godeps/_workspace/src/google.golang.org/api/googleapi/internal/uritemplates/LICENSE b/Godeps/_workspace/src/google.golang.org/api/googleapi/internal/uritemplates/LICENSE new file mode 100644 index 000000000..de9c88cb6 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/api/googleapi/internal/uritemplates/LICENSE @@ -0,0 +1,18 @@ +Copyright (c) 2013 Joshua Tacoma + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go b/Godeps/_workspace/src/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go new file mode 100644 index 000000000..8a84813fe --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go @@ -0,0 +1,359 @@ +// Copyright 2013 Joshua Tacoma. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uritemplates is a level 4 implementation of RFC 6570 (URI +// Template, http://tools.ietf.org/html/rfc6570). +// +// To use uritemplates, parse a template string and expand it with a value +// map: +// +// template, _ := uritemplates.Parse("https://api.github.com/repos{/user,repo}") +// values := make(map[string]interface{}) +// values["user"] = "jtacoma" +// values["repo"] = "uritemplates" +// expanded, _ := template.ExpandString(values) +// fmt.Printf(expanded) +// +package uritemplates + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]") + reserved = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]") + validname = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$") + hex = []byte("0123456789ABCDEF") +) + +func pctEncode(src []byte) []byte { + dst := make([]byte, len(src)*3) + for i, b := range src { + buf := dst[i*3 : i*3+3] + buf[0] = 0x25 + buf[1] = hex[b/16] + buf[2] = hex[b%16] + } + return dst +} + +func escape(s string, allowReserved bool) (escaped string) { + if allowReserved { + escaped = string(reserved.ReplaceAllFunc([]byte(s), pctEncode)) + } else { + escaped = string(unreserved.ReplaceAllFunc([]byte(s), pctEncode)) + } + return escaped +} + +// A UriTemplate is a parsed representation of a URI template. +type UriTemplate struct { + raw string + parts []templatePart +} + +// Parse parses a URI template string into a UriTemplate object. +func Parse(rawtemplate string) (template *UriTemplate, err error) { + template = new(UriTemplate) + template.raw = rawtemplate + split := strings.Split(rawtemplate, "{") + template.parts = make([]templatePart, len(split)*2-1) + for i, s := range split { + if i == 0 { + if strings.Contains(s, "}") { + err = errors.New("unexpected }") + break + } + template.parts[i].raw = s + } else { + subsplit := strings.Split(s, "}") + if len(subsplit) != 2 { + err = errors.New("malformed template") + break + } + expression := subsplit[0] + template.parts[i*2-1], err = parseExpression(expression) + if err != nil { + break + } + template.parts[i*2].raw = subsplit[1] + } + } + if err != nil { + template = nil + } + return template, err +} + +type templatePart struct { + raw string + terms []templateTerm + first string + sep string + named bool + ifemp string + allowReserved bool +} + +type templateTerm struct { + name string + explode bool + truncate int +} + +func parseExpression(expression string) (result templatePart, err error) { + switch expression[0] { + case '+': + result.sep = "," + result.allowReserved = true + expression = expression[1:] + case '.': + result.first = "." + result.sep = "." + expression = expression[1:] + case '/': + result.first = "/" + result.sep = "/" + expression = expression[1:] + case ';': + result.first = ";" + result.sep = ";" + result.named = true + expression = expression[1:] + case '?': + result.first = "?" + result.sep = "&" + result.named = true + result.ifemp = "=" + expression = expression[1:] + case '&': + result.first = "&" + result.sep = "&" + result.named = true + result.ifemp = "=" + expression = expression[1:] + case '#': + result.first = "#" + result.sep = "," + result.allowReserved = true + expression = expression[1:] + default: + result.sep = "," + } + rawterms := strings.Split(expression, ",") + result.terms = make([]templateTerm, len(rawterms)) + for i, raw := range rawterms { + result.terms[i], err = parseTerm(raw) + if err != nil { + break + } + } + return result, err +} + +func parseTerm(term string) (result templateTerm, err error) { + if strings.HasSuffix(term, "*") { + result.explode = true + term = term[:len(term)-1] + } + split := strings.Split(term, ":") + if len(split) == 1 { + result.name = term + } else if len(split) == 2 { + result.name = split[0] + var parsed int64 + parsed, err = strconv.ParseInt(split[1], 10, 0) + result.truncate = int(parsed) + } else { + err = errors.New("multiple colons in same term") + } + if !validname.MatchString(result.name) { + err = errors.New("not a valid name: " + result.name) + } + if result.explode && result.truncate > 0 { + err = errors.New("both explode and prefix modifers on same term") + } + return result, err +} + +// Expand expands a URI template with a set of values to produce a string. +func (self *UriTemplate) Expand(value interface{}) (string, error) { + values, ismap := value.(map[string]interface{}) + if !ismap { + if m, ismap := struct2map(value); !ismap { + return "", errors.New("expected map[string]interface{}, struct, or pointer to struct.") + } else { + return self.Expand(m) + } + } + var buf bytes.Buffer + for _, p := range self.parts { + err := p.expand(&buf, values) + if err != nil { + return "", err + } + } + return buf.String(), nil +} + +func (self *templatePart) expand(buf *bytes.Buffer, values map[string]interface{}) error { + if len(self.raw) > 0 { + buf.WriteString(self.raw) + return nil + } + var zeroLen = buf.Len() + buf.WriteString(self.first) + var firstLen = buf.Len() + for _, term := range self.terms { + value, exists := values[term.name] + if !exists { + continue + } + if buf.Len() != firstLen { + buf.WriteString(self.sep) + } + switch v := value.(type) { + case string: + self.expandString(buf, term, v) + case []interface{}: + self.expandArray(buf, term, v) + case map[string]interface{}: + if term.truncate > 0 { + return errors.New("cannot truncate a map expansion") + } + self.expandMap(buf, term, v) + default: + if m, ismap := struct2map(value); ismap { + if term.truncate > 0 { + return errors.New("cannot truncate a map expansion") + } + self.expandMap(buf, term, m) + } else { + str := fmt.Sprintf("%v", value) + self.expandString(buf, term, str) + } + } + } + if buf.Len() == firstLen { + original := buf.Bytes()[:zeroLen] + buf.Reset() + buf.Write(original) + } + return nil +} + +func (self *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) { + if self.named { + buf.WriteString(name) + if empty { + buf.WriteString(self.ifemp) + } else { + buf.WriteString("=") + } + } +} + +func (self *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) { + if len(s) > t.truncate && t.truncate > 0 { + s = s[:t.truncate] + } + self.expandName(buf, t.name, len(s) == 0) + buf.WriteString(escape(s, self.allowReserved)) +} + +func (self *templatePart) expandArray(buf *bytes.Buffer, t templateTerm, a []interface{}) { + if len(a) == 0 { + return + } else if !t.explode { + self.expandName(buf, t.name, false) + } + for i, value := range a { + if t.explode && i > 0 { + buf.WriteString(self.sep) + } else if i > 0 { + buf.WriteString(",") + } + var s string + switch v := value.(type) { + case string: + s = v + default: + s = fmt.Sprintf("%v", v) + } + if len(s) > t.truncate && t.truncate > 0 { + s = s[:t.truncate] + } + if self.named && t.explode { + self.expandName(buf, t.name, len(s) == 0) + } + buf.WriteString(escape(s, self.allowReserved)) + } +} + +func (self *templatePart) expandMap(buf *bytes.Buffer, t templateTerm, m map[string]interface{}) { + if len(m) == 0 { + return + } + if !t.explode { + self.expandName(buf, t.name, len(m) == 0) + } + var firstLen = buf.Len() + for k, value := range m { + if firstLen != buf.Len() { + if t.explode { + buf.WriteString(self.sep) + } else { + buf.WriteString(",") + } + } + var s string + switch v := value.(type) { + case string: + s = v + default: + s = fmt.Sprintf("%v", v) + } + if t.explode { + buf.WriteString(escape(k, self.allowReserved)) + buf.WriteRune('=') + buf.WriteString(escape(s, self.allowReserved)) + } else { + buf.WriteString(escape(k, self.allowReserved)) + buf.WriteRune(',') + buf.WriteString(escape(s, self.allowReserved)) + } + } +} + +func struct2map(v interface{}) (map[string]interface{}, bool) { + value := reflect.ValueOf(v) + switch value.Type().Kind() { + case reflect.Ptr: + return struct2map(value.Elem().Interface()) + case reflect.Struct: + m := make(map[string]interface{}) + for i := 0; i < value.NumField(); i++ { + tag := value.Type().Field(i).Tag + var name string + if strings.Contains(string(tag), ":") { + name = tag.Get("uri") + } else { + name = strings.TrimSpace(string(tag)) + } + if len(name) == 0 { + name = value.Type().Field(i).Name + } + m[name] = value.Field(i).Interface() + } + return m, true + } + return nil, false +} diff --git a/Godeps/_workspace/src/google.golang.org/api/googleapi/internal/uritemplates/utils.go b/Godeps/_workspace/src/google.golang.org/api/googleapi/internal/uritemplates/utils.go new file mode 100644 index 000000000..399ef4623 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/api/googleapi/internal/uritemplates/utils.go @@ -0,0 +1,13 @@ +package uritemplates + +func Expand(path string, expansions map[string]string) (string, error) { + template, err := Parse(path) + if err != nil { + return "", err + } + values := make(map[string]interface{}) + for k, v := range expansions { + values[k] = v + } + return template.Expand(values) +} diff --git a/Godeps/_workspace/src/google.golang.org/api/googleapi/transport/apikey.go b/Godeps/_workspace/src/google.golang.org/api/googleapi/transport/apikey.go new file mode 100644 index 000000000..eca1ea250 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/api/googleapi/transport/apikey.go @@ -0,0 +1,38 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package transport contains HTTP transports used to make +// authenticated API requests. +package transport + +import ( + "errors" + "net/http" +) + +// APIKey is an HTTP Transport which wraps an underlying transport and +// appends an API Key "key" parameter to the URL of outgoing requests. +type APIKey struct { + // Key is the API Key to set on requests. + Key string + + // Transport is the underlying HTTP transport. + // If nil, http.DefaultTransport is used. + Transport http.RoundTripper +} + +func (t *APIKey) RoundTrip(req *http.Request) (*http.Response, error) { + rt := t.Transport + if rt == nil { + rt = http.DefaultTransport + if rt == nil { + return nil, errors.New("googleapi/transport: no Transport specified or available") + } + } + newReq := *req + args := newReq.URL.Query() + args.Set("key", t.Key) + newReq.URL.RawQuery = args.Encode() + return rt.RoundTrip(&newReq) +} diff --git a/Godeps/_workspace/src/google.golang.org/api/googleapi/types.go b/Godeps/_workspace/src/google.golang.org/api/googleapi/types.go new file mode 100644 index 000000000..a02b4b071 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/api/googleapi/types.go @@ -0,0 +1,182 @@ +// Copyright 2013 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package googleapi + +import ( + "encoding/json" + "strconv" +) + +// Int64s is a slice of int64s that marshal as quoted strings in JSON. +type Int64s []int64 + +func (q *Int64s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return err + } + *q = append(*q, int64(v)) + } + return nil +} + +// Int32s is a slice of int32s that marshal as quoted strings in JSON. +type Int32s []int32 + +func (q *Int32s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseInt(s, 10, 32) + if err != nil { + return err + } + *q = append(*q, int32(v)) + } + return nil +} + +// Uint64s is a slice of uint64s that marshal as quoted strings in JSON. +type Uint64s []uint64 + +func (q *Uint64s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return err + } + *q = append(*q, uint64(v)) + } + return nil +} + +// Uint32s is a slice of uint32s that marshal as quoted strings in JSON. +type Uint32s []uint32 + +func (q *Uint32s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseUint(s, 10, 32) + if err != nil { + return err + } + *q = append(*q, uint32(v)) + } + return nil +} + +// Float64s is a slice of float64s that marshal as quoted strings in JSON. +type Float64s []float64 + +func (q *Float64s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseFloat(s, 64) + if err != nil { + return err + } + *q = append(*q, float64(v)) + } + return nil +} + +func quotedList(n int, fn func(dst []byte, i int) []byte) ([]byte, error) { + dst := make([]byte, 0, 2+n*10) // somewhat arbitrary + dst = append(dst, '[') + for i := 0; i < n; i++ { + if i > 0 { + dst = append(dst, ',') + } + dst = append(dst, '"') + dst = fn(dst, i) + dst = append(dst, '"') + } + dst = append(dst, ']') + return dst, nil +} + +func (s Int64s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendInt(dst, s[i], 10) + }) +} + +func (s Int32s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendInt(dst, int64(s[i]), 10) + }) +} + +func (s Uint64s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendUint(dst, s[i], 10) + }) +} + +func (s Uint32s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendUint(dst, uint64(s[i]), 10) + }) +} + +func (s Float64s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendFloat(dst, s[i], 'g', -1, 64) + }) +} + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { return &v } + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { return &v } + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { return &v } + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { return &v } + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { return &v } + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { return &v } + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { return &v } diff --git a/Godeps/_workspace/src/google.golang.org/api/googleapi/types_test.go b/Godeps/_workspace/src/google.golang.org/api/googleapi/types_test.go new file mode 100644 index 000000000..a6b204515 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/api/googleapi/types_test.go @@ -0,0 +1,44 @@ +// Copyright 2013 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package googleapi + +import ( + "encoding/json" + "reflect" + "testing" +) + +func TestTypes(t *testing.T) { + type T struct { + I32 Int32s + I64 Int64s + U32 Uint32s + U64 Uint64s + F64 Float64s + } + v := &T{ + I32: Int32s{-1, 2, 3}, + I64: Int64s{-1, 2, 1 << 33}, + U32: Uint32s{1, 2}, + U64: Uint64s{1, 2, 1 << 33}, + F64: Float64s{1.5, 3.33}, + } + got, err := json.Marshal(v) + if err != nil { + t.Fatal(err) + } + want := `{"I32":["-1","2","3"],"I64":["-1","2","8589934592"],"U32":["1","2"],"U64":["1","2","8589934592"],"F64":["1.5","3.33"]}` + if string(got) != want { + t.Fatalf("Marshal mismatch.\n got: %s\nwant: %s\n", got, want) + } + + v2 := new(T) + if err := json.Unmarshal(got, v2); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + if !reflect.DeepEqual(v, v2) { + t.Fatalf("Unmarshal didn't produce same results.\n got: %#v\nwant: %#v\n", v, v2) + } +} diff --git a/Godeps/_workspace/src/google.golang.org/api/storage/v1/storage-api.json b/Godeps/_workspace/src/google.golang.org/api/storage/v1/storage-api.json new file mode 100644 index 000000000..2559a45a4 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/api/storage/v1/storage-api.json @@ -0,0 +1,2824 @@ +{ + "kind": "discovery#restDescription", + "etag": "\"ye6orv2F-1npMW3u9suM3a7C5Bo/9Bg7_QKT-vp8XVgawS-R0QOOugY\"", + "discoveryVersion": "v1", + "id": "storage:v1", + "name": "storage", + "version": "v1", + "revision": "20150826", + "title": "Cloud Storage API", + "description": "Lets you store and retrieve potentially-large, immutable data objects.", + "ownerDomain": "google.com", + "ownerName": "Google", + "icons": { + "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", + "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" + }, + "documentationLink": "https://developers.google.com/storage/docs/json_api/", + "labels": [ + "labs" + ], + "protocol": "rest", + "baseUrl": "https://www.googleapis.com/storage/v1/", + "basePath": "/storage/v1/", + "rootUrl": "https://www.googleapis.com/", + "servicePath": "storage/v1/", + "batchPath": "batch", + "parameters": { + "alt": { + "type": "string", + "description": "Data format for the response.", + "default": "json", + "enum": [ + "json" + ], + "enumDescriptions": [ + "Responses with Content-Type of application/json" + ], + "location": "query" + }, + "fields": { + "type": "string", + "description": "Selector specifying which fields to include in a partial response.", + "location": "query" + }, + "key": { + "type": "string", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "location": "query" + }, + "oauth_token": { + "type": "string", + "description": "OAuth 2.0 token for the current user.", + "location": "query" + }, + "prettyPrint": { + "type": "boolean", + "description": "Returns response with indentations and line breaks.", + "default": "true", + "location": "query" + }, + "quotaUser": { + "type": "string", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.", + "location": "query" + }, + "userIp": { + "type": "string", + "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.", + "location": "query" + } + }, + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/cloud-platform.read-only": { + "description": "MESSAGE UNDER CONSTRUCTION View your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/devstorage.full_control": { + "description": "Manage your data and permissions in Google Cloud Storage" + }, + "https://www.googleapis.com/auth/devstorage.read_only": { + "description": "View your data in Google Cloud Storage" + }, + "https://www.googleapis.com/auth/devstorage.read_write": { + "description": "Manage your data in Google Cloud Storage" + } + } + } + }, + "schemas": { + "Bucket": { + "id": "Bucket", + "type": "object", + "description": "A bucket.", + "properties": { + "acl": { + "type": "array", + "description": "Access controls on the bucket.", + "items": { + "$ref": "BucketAccessControl" + }, + "annotations": { + "required": [ + "storage.buckets.update" + ] + } + }, + "cors": { + "type": "array", + "description": "The bucket's Cross-Origin Resource Sharing (CORS) configuration.", + "items": { + "type": "object", + "properties": { + "maxAgeSeconds": { + "type": "integer", + "description": "The value, in seconds, to return in the Access-Control-Max-Age header used in preflight responses.", + "format": "int32" + }, + "method": { + "type": "array", + "description": "The list of HTTP methods on which to include CORS response headers, (GET, OPTIONS, POST, etc) Note: \"*\" is permitted in the list of methods, and means \"any method\".", + "items": { + "type": "string" + } + }, + "origin": { + "type": "array", + "description": "The list of Origins eligible to receive CORS response headers. Note: \"*\" is permitted in the list of origins, and means \"any Origin\".", + "items": { + "type": "string" + } + }, + "responseHeader": { + "type": "array", + "description": "The list of HTTP headers other than the simple response headers to give permission for the user-agent to share across domains.", + "items": { + "type": "string" + } + } + } + } + }, + "defaultObjectAcl": { + "type": "array", + "description": "Default access controls to apply to new objects when no ACL is provided.", + "items": { + "$ref": "ObjectAccessControl" + } + }, + "etag": { + "type": "string", + "description": "HTTP 1.1 Entity tag for the bucket." + }, + "id": { + "type": "string", + "description": "The ID of the bucket." + }, + "kind": { + "type": "string", + "description": "The kind of item this is. For buckets, this is always storage#bucket.", + "default": "storage#bucket" + }, + "lifecycle": { + "type": "object", + "description": "The bucket's lifecycle configuration. See lifecycle management for more information.", + "properties": { + "rule": { + "type": "array", + "description": "A lifecycle management rule, which is made of an action to take and the condition(s) under which the action will be taken.", + "items": { + "type": "object", + "properties": { + "action": { + "type": "object", + "description": "The action to take.", + "properties": { + "type": { + "type": "string", + "description": "Type of the action. Currently, only Delete is supported." + } + } + }, + "condition": { + "type": "object", + "description": "The condition(s) under which the action will be taken.", + "properties": { + "age": { + "type": "integer", + "description": "Age of an object (in days). This condition is satisfied when an object reaches the specified age.", + "format": "int32" + }, + "createdBefore": { + "type": "string", + "description": "A date in RFC 3339 format with only the date part (for instance, \"2013-01-15\"). This condition is satisfied when an object is created before midnight of the specified date in UTC.", + "format": "date" + }, + "isLive": { + "type": "boolean", + "description": "Relevant only for versioned objects. If the value is true, this condition matches live objects; if the value is false, it matches archived objects." + }, + "numNewerVersions": { + "type": "integer", + "description": "Relevant only for versioned objects. If the value is N, this condition is satisfied when there are at least N versions (including the live version) newer than this version of the object.", + "format": "int32" + } + } + } + } + } + } + } + }, + "location": { + "type": "string", + "description": "The location of the bucket. Object data for objects in the bucket resides in physical storage within this region. Defaults to US. See the developer's guide for the authoritative list." + }, + "logging": { + "type": "object", + "description": "The bucket's logging configuration, which defines the destination bucket and optional name prefix for the current bucket's logs.", + "properties": { + "logBucket": { + "type": "string", + "description": "The destination bucket where the current bucket's logs should be placed." + }, + "logObjectPrefix": { + "type": "string", + "description": "A prefix for log object names." + } + } + }, + "metageneration": { + "type": "string", + "description": "The metadata generation of this bucket.", + "format": "int64" + }, + "name": { + "type": "string", + "description": "The name of the bucket.", + "annotations": { + "required": [ + "storage.buckets.insert" + ] + } + }, + "owner": { + "type": "object", + "description": "The owner of the bucket. This is always the project team's owner group.", + "properties": { + "entity": { + "type": "string", + "description": "The entity, in the form project-owner-projectId." + }, + "entityId": { + "type": "string", + "description": "The ID for the entity." + } + } + }, + "projectNumber": { + "type": "string", + "description": "The project number of the project the bucket belongs to.", + "format": "uint64" + }, + "selfLink": { + "type": "string", + "description": "The URI of this bucket." + }, + "storageClass": { + "type": "string", + "description": "The bucket's storage class. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include STANDARD, NEARLINE and DURABLE_REDUCED_AVAILABILITY. Defaults to STANDARD. For more information, see storage classes." + }, + "timeCreated": { + "type": "string", + "description": "Creation time of the bucket in RFC 3339 format.", + "format": "date-time" + }, + "versioning": { + "type": "object", + "description": "The bucket's versioning configuration.", + "properties": { + "enabled": { + "type": "boolean", + "description": "While set to true, versioning is fully enabled for this bucket." + } + } + }, + "website": { + "type": "object", + "description": "The bucket's website configuration.", + "properties": { + "mainPageSuffix": { + "type": "string", + "description": "Behaves as the bucket's directory index where missing objects are treated as potential directories." + }, + "notFoundPage": { + "type": "string", + "description": "The custom object to return when a requested resource is not found." + } + } + } + } + }, + "BucketAccessControl": { + "id": "BucketAccessControl", + "type": "object", + "description": "An access-control entry.", + "properties": { + "bucket": { + "type": "string", + "description": "The name of the bucket." + }, + "domain": { + "type": "string", + "description": "The domain associated with the entity, if any." + }, + "email": { + "type": "string", + "description": "The email address associated with the entity, if any." + }, + "entity": { + "type": "string", + "description": "The entity holding the permission, in one of the following forms: \n- user-userId \n- user-email \n- group-groupId \n- group-email \n- domain-domain \n- project-team-projectId \n- allUsers \n- allAuthenticatedUsers Examples: \n- The user liz@example.com would be user-liz@example.com. \n- The group example@googlegroups.com would be group-example@googlegroups.com. \n- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com.", + "annotations": { + "required": [ + "storage.bucketAccessControls.insert" + ] + } + }, + "entityId": { + "type": "string", + "description": "The ID for the entity, if any." + }, + "etag": { + "type": "string", + "description": "HTTP 1.1 Entity tag for the access-control entry." + }, + "id": { + "type": "string", + "description": "The ID of the access-control entry." + }, + "kind": { + "type": "string", + "description": "The kind of item this is. For bucket access control entries, this is always storage#bucketAccessControl.", + "default": "storage#bucketAccessControl" + }, + "projectTeam": { + "type": "object", + "description": "The project team associated with the entity, if any.", + "properties": { + "projectNumber": { + "type": "string", + "description": "The project number." + }, + "team": { + "type": "string", + "description": "The team. Can be owners, editors, or viewers." + } + } + }, + "role": { + "type": "string", + "description": "The access permission for the entity. Can be READER, WRITER, or OWNER.", + "annotations": { + "required": [ + "storage.bucketAccessControls.insert" + ] + } + }, + "selfLink": { + "type": "string", + "description": "The link to this access-control entry." + } + } + }, + "BucketAccessControls": { + "id": "BucketAccessControls", + "type": "object", + "description": "An access-control list.", + "properties": { + "items": { + "type": "array", + "description": "The list of items.", + "items": { + "$ref": "BucketAccessControl" + } + }, + "kind": { + "type": "string", + "description": "The kind of item this is. For lists of bucket access control entries, this is always storage#bucketAccessControls.", + "default": "storage#bucketAccessControls" + } + } + }, + "Buckets": { + "id": "Buckets", + "type": "object", + "description": "A list of buckets.", + "properties": { + "items": { + "type": "array", + "description": "The list of items.", + "items": { + "$ref": "Bucket" + } + }, + "kind": { + "type": "string", + "description": "The kind of item this is. For lists of buckets, this is always storage#buckets.", + "default": "storage#buckets" + }, + "nextPageToken": { + "type": "string", + "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results." + } + } + }, + "Channel": { + "id": "Channel", + "type": "object", + "description": "An notification channel used to watch for resource changes.", + "properties": { + "address": { + "type": "string", + "description": "The address where notifications are delivered for this channel." + }, + "expiration": { + "type": "string", + "description": "Date and time of notification channel expiration, expressed as a Unix timestamp, in milliseconds. Optional.", + "format": "int64" + }, + "id": { + "type": "string", + "description": "A UUID or similar unique string that identifies this channel." + }, + "kind": { + "type": "string", + "description": "Identifies this as a notification channel used to watch for changes to a resource. Value: the fixed string \"api#channel\".", + "default": "api#channel" + }, + "params": { + "type": "object", + "description": "Additional parameters controlling delivery channel behavior. Optional.", + "additionalProperties": { + "type": "string", + "description": "Declares a new parameter by name." + } + }, + "payload": { + "type": "boolean", + "description": "A Boolean value to indicate whether payload is wanted. Optional." + }, + "resourceId": { + "type": "string", + "description": "An opaque ID that identifies the resource being watched on this channel. Stable across different API versions." + }, + "resourceUri": { + "type": "string", + "description": "A version-specific identifier for the watched resource." + }, + "token": { + "type": "string", + "description": "An arbitrary string delivered to the target address with each notification delivered over this channel. Optional." + }, + "type": { + "type": "string", + "description": "The type of delivery mechanism used for this channel." + } + } + }, + "ComposeRequest": { + "id": "ComposeRequest", + "type": "object", + "description": "A Compose request.", + "properties": { + "destination": { + "$ref": "Object", + "description": "Properties of the resulting object." + }, + "kind": { + "type": "string", + "description": "The kind of item this is.", + "default": "storage#composeRequest" + }, + "sourceObjects": { + "type": "array", + "description": "The list of source objects that will be concatenated into a single object.", + "items": { + "type": "object", + "properties": { + "generation": { + "type": "string", + "description": "The generation of this object to use as the source.", + "format": "int64" + }, + "name": { + "type": "string", + "description": "The source object's name. The source object's bucket is implicitly the destination bucket.", + "annotations": { + "required": [ + "storage.objects.compose" + ] + } + }, + "objectPreconditions": { + "type": "object", + "description": "Conditions that must be met for this operation to execute.", + "properties": { + "ifGenerationMatch": { + "type": "string", + "description": "Only perform the composition if the generation of the source object that would be used matches this value. If this value and a generation are both specified, they must be the same value or the call will fail.", + "format": "int64" + } + } + } + } + }, + "annotations": { + "required": [ + "storage.objects.compose" + ] + } + } + } + }, + "Object": { + "id": "Object", + "type": "object", + "description": "An object.", + "properties": { + "acl": { + "type": "array", + "description": "Access controls on the object.", + "items": { + "$ref": "ObjectAccessControl" + }, + "annotations": { + "required": [ + "storage.objects.update" + ] + } + }, + "bucket": { + "type": "string", + "description": "The name of the bucket containing this object." + }, + "cacheControl": { + "type": "string", + "description": "Cache-Control directive for the object data." + }, + "componentCount": { + "type": "integer", + "description": "Number of underlying components that make up this object. Components are accumulated by compose operations.", + "format": "int32" + }, + "contentDisposition": { + "type": "string", + "description": "Content-Disposition of the object data." + }, + "contentEncoding": { + "type": "string", + "description": "Content-Encoding of the object data." + }, + "contentLanguage": { + "type": "string", + "description": "Content-Language of the object data." + }, + "contentType": { + "type": "string", + "description": "Content-Type of the object data.", + "annotations": { + "required": [ + "storage.objects.insert", + "storage.objects.update" + ] + } + }, + "crc32c": { + "type": "string", + "description": "CRC32c checksum, as described in RFC 4960, Appendix B; encoded using base64 in big-endian byte order. For more information about using the CRC32c checksum, see Hashes and ETags: Best Practices." + }, + "etag": { + "type": "string", + "description": "HTTP 1.1 Entity tag for the object." + }, + "generation": { + "type": "string", + "description": "The content generation of this object. Used for object versioning.", + "format": "int64" + }, + "id": { + "type": "string", + "description": "The ID of the object." + }, + "kind": { + "type": "string", + "description": "The kind of item this is. For objects, this is always storage#object.", + "default": "storage#object" + }, + "md5Hash": { + "type": "string", + "description": "MD5 hash of the data; encoded using base64. For more information about using the MD5 hash, see Hashes and ETags: Best Practices." + }, + "mediaLink": { + "type": "string", + "description": "Media download link." + }, + "metadata": { + "type": "object", + "description": "User-provided metadata, in key/value pairs.", + "additionalProperties": { + "type": "string", + "description": "An individual metadata entry." + } + }, + "metageneration": { + "type": "string", + "description": "The version of the metadata for this object at this generation. Used for preconditions and for detecting changes in metadata. A metageneration number is only meaningful in the context of a particular generation of a particular object.", + "format": "int64" + }, + "name": { + "type": "string", + "description": "The name of this object. Required if not specified by URL parameter." + }, + "owner": { + "type": "object", + "description": "The owner of the object. This will always be the uploader of the object.", + "properties": { + "entity": { + "type": "string", + "description": "The entity, in the form user-userId." + }, + "entityId": { + "type": "string", + "description": "The ID for the entity." + } + } + }, + "selfLink": { + "type": "string", + "description": "The link to this object." + }, + "size": { + "type": "string", + "description": "Content-Length of the data in bytes.", + "format": "uint64" + }, + "storageClass": { + "type": "string", + "description": "Storage class of the object." + }, + "timeDeleted": { + "type": "string", + "description": "The deletion time of the object in RFC 3339 format. Will be returned if and only if this version of the object has been deleted.", + "format": "date-time" + }, + "updated": { + "type": "string", + "description": "The creation or modification time of the object in RFC 3339 format. For buckets with versioning enabled, changing an object's metadata does not change this property.", + "format": "date-time" + } + } + }, + "ObjectAccessControl": { + "id": "ObjectAccessControl", + "type": "object", + "description": "An access-control entry.", + "properties": { + "bucket": { + "type": "string", + "description": "The name of the bucket." + }, + "domain": { + "type": "string", + "description": "The domain associated with the entity, if any." + }, + "email": { + "type": "string", + "description": "The email address associated with the entity, if any." + }, + "entity": { + "type": "string", + "description": "The entity holding the permission, in one of the following forms: \n- user-userId \n- user-email \n- group-groupId \n- group-email \n- domain-domain \n- project-team-projectId \n- allUsers \n- allAuthenticatedUsers Examples: \n- The user liz@example.com would be user-liz@example.com. \n- The group example@googlegroups.com would be group-example@googlegroups.com. \n- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com." + }, + "entityId": { + "type": "string", + "description": "The ID for the entity, if any." + }, + "etag": { + "type": "string", + "description": "HTTP 1.1 Entity tag for the access-control entry." + }, + "generation": { + "type": "string", + "description": "The content generation of the object.", + "format": "int64" + }, + "id": { + "type": "string", + "description": "The ID of the access-control entry." + }, + "kind": { + "type": "string", + "description": "The kind of item this is. For object access control entries, this is always storage#objectAccessControl.", + "default": "storage#objectAccessControl" + }, + "object": { + "type": "string", + "description": "The name of the object." + }, + "projectTeam": { + "type": "object", + "description": "The project team associated with the entity, if any.", + "properties": { + "projectNumber": { + "type": "string", + "description": "The project number." + }, + "team": { + "type": "string", + "description": "The team. Can be owners, editors, or viewers." + } + } + }, + "role": { + "type": "string", + "description": "The access permission for the entity. Can be READER or OWNER." + }, + "selfLink": { + "type": "string", + "description": "The link to this access-control entry." + } + } + }, + "ObjectAccessControls": { + "id": "ObjectAccessControls", + "type": "object", + "description": "An access-control list.", + "properties": { + "items": { + "type": "array", + "description": "The list of items.", + "items": { + "type": "any" + } + }, + "kind": { + "type": "string", + "description": "The kind of item this is. For lists of object access control entries, this is always storage#objectAccessControls.", + "default": "storage#objectAccessControls" + } + } + }, + "Objects": { + "id": "Objects", + "type": "object", + "description": "A list of objects.", + "properties": { + "items": { + "type": "array", + "description": "The list of items.", + "items": { + "$ref": "Object" + } + }, + "kind": { + "type": "string", + "description": "The kind of item this is. For lists of objects, this is always storage#objects.", + "default": "storage#objects" + }, + "nextPageToken": { + "type": "string", + "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results." + }, + "prefixes": { + "type": "array", + "description": "The list of prefixes of objects matching-but-not-listed up to and including the requested delimiter.", + "items": { + "type": "string" + } + } + } + }, + "RewriteResponse": { + "id": "RewriteResponse", + "type": "object", + "description": "A rewrite response.", + "properties": { + "done": { + "type": "boolean", + "description": "true if the copy is finished; otherwise, false if the copy is in progress. This property is always present in the response." + }, + "kind": { + "type": "string", + "description": "The kind of item this is.", + "default": "storage#rewriteResponse" + }, + "objectSize": { + "type": "string", + "description": "The total size of the object being copied in bytes. This property is always present in the response.", + "format": "uint64" + }, + "resource": { + "$ref": "Object", + "description": "A resource containing the metadata for the copied-to object. This property is present in the response only when copying completes." + }, + "rewriteToken": { + "type": "string", + "description": "A token to use in subsequent requests to continue copying data. This token is present in the response only when there is more data to copy." + }, + "totalBytesRewritten": { + "type": "string", + "description": "The total bytes written so far, which can be used to provide a waiting user with a progress indicator. This property is always present in the response.", + "format": "uint64" + } + } + } + }, + "resources": { + "bucketAccessControls": { + "methods": { + "delete": { + "id": "storage.bucketAccessControls.delete", + "path": "b/{bucket}/acl/{entity}", + "httpMethod": "DELETE", + "description": "Permanently deletes the ACL entry for the specified entity on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "entity" + ], + "scopes": [ + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "get": { + "id": "storage.bucketAccessControls.get", + "path": "b/{bucket}/acl/{entity}", + "httpMethod": "GET", + "description": "Returns the ACL entry for the specified entity on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "entity" + ], + "response": { + "$ref": "BucketAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "insert": { + "id": "storage.bucketAccessControls.insert", + "path": "b/{bucket}/acl", + "httpMethod": "POST", + "description": "Creates a new ACL entry on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket" + ], + "request": { + "$ref": "BucketAccessControl" + }, + "response": { + "$ref": "BucketAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "list": { + "id": "storage.bucketAccessControls.list", + "path": "b/{bucket}/acl", + "httpMethod": "GET", + "description": "Retrieves ACL entries on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket" + ], + "response": { + "$ref": "BucketAccessControls" + }, + "scopes": [ + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "patch": { + "id": "storage.bucketAccessControls.patch", + "path": "b/{bucket}/acl/{entity}", + "httpMethod": "PATCH", + "description": "Updates an ACL entry on the specified bucket. This method supports patch semantics.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "entity" + ], + "request": { + "$ref": "BucketAccessControl" + }, + "response": { + "$ref": "BucketAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "update": { + "id": "storage.bucketAccessControls.update", + "path": "b/{bucket}/acl/{entity}", + "httpMethod": "PUT", + "description": "Updates an ACL entry on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "entity" + ], + "request": { + "$ref": "BucketAccessControl" + }, + "response": { + "$ref": "BucketAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/devstorage.full_control" + ] + } + } + }, + "buckets": { + "methods": { + "delete": { + "id": "storage.buckets.delete", + "path": "b/{bucket}", + "httpMethod": "DELETE", + "description": "Permanently deletes an empty bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "If set, only deletes the bucket if its metageneration matches this value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "If set, only deletes the bucket if its metageneration does not match this value.", + "format": "int64", + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "get": { + "id": "storage.buckets.get", + "path": "b/{bucket}", + "httpMethod": "GET", + "description": "Returns metadata for the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to noAcl.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit acl and defaultObjectAcl properties." + ], + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "response": { + "$ref": "Bucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "insert": { + "id": "storage.buckets.insert", + "path": "b", + "httpMethod": "POST", + "description": "Creates a new bucket.", + "parameters": { + "predefinedAcl": { + "type": "string", + "description": "Apply a predefined set of access controls to this bucket.", + "enum": [ + "authenticatedRead", + "private", + "projectPrivate", + "publicRead", + "publicReadWrite" + ], + "enumDescriptions": [ + "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + "Project team owners get OWNER access.", + "Project team members get access according to their roles.", + "Project team owners get OWNER access, and allUsers get READER access.", + "Project team owners get OWNER access, and allUsers get WRITER access." + ], + "location": "query" + }, + "predefinedDefaultObjectAcl": { + "type": "string", + "description": "Apply a predefined set of default object access controls to this bucket.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query" + }, + "project": { + "type": "string", + "description": "A valid API project identifier.", + "required": true, + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit acl and defaultObjectAcl properties." + ], + "location": "query" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "Bucket" + }, + "response": { + "$ref": "Bucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "list": { + "id": "storage.buckets.list", + "path": "b", + "httpMethod": "GET", + "description": "Retrieves a list of buckets for a given project.", + "parameters": { + "maxResults": { + "type": "integer", + "description": "Maximum number of buckets to return.", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query" + }, + "prefix": { + "type": "string", + "description": "Filter results to buckets whose names begin with this prefix.", + "location": "query" + }, + "project": { + "type": "string", + "description": "A valid API project identifier.", + "required": true, + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to noAcl.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit acl and defaultObjectAcl properties." + ], + "location": "query" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "Buckets" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "patch": { + "id": "storage.buckets.patch", + "path": "b/{bucket}", + "httpMethod": "PATCH", + "description": "Updates a bucket. This method supports patch semantics.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "predefinedAcl": { + "type": "string", + "description": "Apply a predefined set of access controls to this bucket.", + "enum": [ + "authenticatedRead", + "private", + "projectPrivate", + "publicRead", + "publicReadWrite" + ], + "enumDescriptions": [ + "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + "Project team owners get OWNER access.", + "Project team members get access according to their roles.", + "Project team owners get OWNER access, and allUsers get READER access.", + "Project team owners get OWNER access, and allUsers get WRITER access." + ], + "location": "query" + }, + "predefinedDefaultObjectAcl": { + "type": "string", + "description": "Apply a predefined set of default object access controls to this bucket.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit acl and defaultObjectAcl properties." + ], + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "request": { + "$ref": "Bucket" + }, + "response": { + "$ref": "Bucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "update": { + "id": "storage.buckets.update", + "path": "b/{bucket}", + "httpMethod": "PUT", + "description": "Updates a bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "predefinedAcl": { + "type": "string", + "description": "Apply a predefined set of access controls to this bucket.", + "enum": [ + "authenticatedRead", + "private", + "projectPrivate", + "publicRead", + "publicReadWrite" + ], + "enumDescriptions": [ + "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + "Project team owners get OWNER access.", + "Project team members get access according to their roles.", + "Project team owners get OWNER access, and allUsers get READER access.", + "Project team owners get OWNER access, and allUsers get WRITER access." + ], + "location": "query" + }, + "predefinedDefaultObjectAcl": { + "type": "string", + "description": "Apply a predefined set of default object access controls to this bucket.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit acl and defaultObjectAcl properties." + ], + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "request": { + "$ref": "Bucket" + }, + "response": { + "$ref": "Bucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + } + } + }, + "channels": { + "methods": { + "stop": { + "id": "storage.channels.stop", + "path": "channels/stop", + "httpMethod": "POST", + "description": "Stop watching resources through this channel", + "request": { + "$ref": "Channel", + "parameterName": "resource" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + } + } + }, + "defaultObjectAccessControls": { + "methods": { + "delete": { + "id": "storage.defaultObjectAccessControls.delete", + "path": "b/{bucket}/defaultObjectAcl/{entity}", + "httpMethod": "DELETE", + "description": "Permanently deletes the default object ACL entry for the specified entity on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "entity" + ], + "scopes": [ + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "get": { + "id": "storage.defaultObjectAccessControls.get", + "path": "b/{bucket}/defaultObjectAcl/{entity}", + "httpMethod": "GET", + "description": "Returns the default object ACL entry for the specified entity on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "entity" + ], + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "insert": { + "id": "storage.defaultObjectAccessControls.insert", + "path": "b/{bucket}/defaultObjectAcl", + "httpMethod": "POST", + "description": "Creates a new default object ACL entry on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket" + ], + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "list": { + "id": "storage.defaultObjectAccessControls.list", + "path": "b/{bucket}/defaultObjectAcl", + "httpMethod": "GET", + "description": "Retrieves default object ACL entries on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "If present, only return default ACL listing if the bucket's current metageneration matches this value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "If present, only return default ACL listing if the bucket's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "response": { + "$ref": "ObjectAccessControls" + }, + "scopes": [ + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "patch": { + "id": "storage.defaultObjectAccessControls.patch", + "path": "b/{bucket}/defaultObjectAcl/{entity}", + "httpMethod": "PATCH", + "description": "Updates a default object ACL entry on the specified bucket. This method supports patch semantics.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "entity" + ], + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "update": { + "id": "storage.defaultObjectAccessControls.update", + "path": "b/{bucket}/defaultObjectAcl/{entity}", + "httpMethod": "PUT", + "description": "Updates a default object ACL entry on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "entity" + ], + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/devstorage.full_control" + ] + } + } + }, + "objectAccessControls": { + "methods": { + "delete": { + "id": "storage.objectAccessControls.delete", + "path": "b/{bucket}/o/{object}/acl/{entity}", + "httpMethod": "DELETE", + "description": "Permanently deletes the ACL entry for the specified entity on the specified object.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "object", + "entity" + ], + "scopes": [ + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "get": { + "id": "storage.objectAccessControls.get", + "path": "b/{bucket}/o/{object}/acl/{entity}", + "httpMethod": "GET", + "description": "Returns the ACL entry for the specified entity on the specified object.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "object", + "entity" + ], + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "insert": { + "id": "storage.objectAccessControls.insert", + "path": "b/{bucket}/o/{object}/acl", + "httpMethod": "POST", + "description": "Creates a new ACL entry on the specified object.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "object" + ], + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "list": { + "id": "storage.objectAccessControls.list", + "path": "b/{bucket}/o/{object}/acl", + "httpMethod": "GET", + "description": "Retrieves ACL entries on the specified object.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "object" + ], + "response": { + "$ref": "ObjectAccessControls" + }, + "scopes": [ + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "patch": { + "id": "storage.objectAccessControls.patch", + "path": "b/{bucket}/o/{object}/acl/{entity}", + "httpMethod": "PATCH", + "description": "Updates an ACL entry on the specified object. This method supports patch semantics.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "object", + "entity" + ], + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "update": { + "id": "storage.objectAccessControls.update", + "path": "b/{bucket}/o/{object}/acl/{entity}", + "httpMethod": "PUT", + "description": "Updates an ACL entry on the specified object.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "object", + "entity" + ], + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/devstorage.full_control" + ] + } + } + }, + "objects": { + "methods": { + "compose": { + "id": "storage.objects.compose", + "path": "b/{destinationBucket}/o/{destinationObject}/compose", + "httpMethod": "POST", + "description": "Concatenates a list of existing objects into a new object in the same bucket.", + "parameters": { + "destinationBucket": { + "type": "string", + "description": "Name of the bucket in which to store the new object.", + "required": true, + "location": "path" + }, + "destinationObject": { + "type": "string", + "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + }, + "destinationPredefinedAcl": { + "type": "string", + "description": "Apply a predefined set of access controls to the destination object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query" + }, + "ifGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + } + }, + "parameterOrder": [ + "destinationBucket", + "destinationObject" + ], + "request": { + "$ref": "ComposeRequest" + }, + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsMediaDownload": true + }, + "copy": { + "id": "storage.objects.copy", + "path": "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}", + "httpMethod": "POST", + "description": "Copies a source object to a destination object. Optionally overrides metadata.", + "parameters": { + "destinationBucket": { + "type": "string", + "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + }, + "destinationObject": { + "type": "string", + "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.", + "required": true, + "location": "path" + }, + "destinationPredefinedAcl": { + "type": "string", + "description": "Apply a predefined set of access controls to the destination object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query" + }, + "ifGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the destination object's current generation matches the given value.", + "format": "int64", + "location": "query" + }, + "ifGenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "ifSourceGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the source object's generation matches the given value.", + "format": "int64", + "location": "query" + }, + "ifSourceGenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the source object's generation does not match the given value.", + "format": "int64", + "location": "query" + }, + "ifSourceMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifSourceMetagenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the acl property." + ], + "location": "query" + }, + "sourceBucket": { + "type": "string", + "description": "Name of the bucket in which to find the source object.", + "required": true, + "location": "path" + }, + "sourceGeneration": { + "type": "string", + "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "sourceObject": { + "type": "string", + "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "sourceBucket", + "sourceObject", + "destinationBucket", + "destinationObject" + ], + "request": { + "$ref": "Object" + }, + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsMediaDownload": true + }, + "delete": { + "id": "storage.objects.delete", + "path": "b/{bucket}/o/{object}", + "httpMethod": "DELETE", + "description": "Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of the bucket in which the object resides.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "ifGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation matches the given value.", + "format": "int64", + "location": "query" + }, + "ifGenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "object" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "get": { + "id": "storage.objects.get", + "path": "b/{bucket}/o/{object}", + "httpMethod": "GET", + "description": "Retrieves an object or its metadata.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of the bucket in which the object resides.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "ifGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's generation matches the given value.", + "format": "int64", + "location": "query" + }, + "ifGenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's generation does not match the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to noAcl.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the acl property." + ], + "location": "query" + } + }, + "parameterOrder": [ + "bucket", + "object" + ], + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsMediaDownload": true + }, + "insert": { + "id": "storage.objects.insert", + "path": "b/{bucket}/o", + "httpMethod": "POST", + "description": "Stores a new object and metadata.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", + "required": true, + "location": "path" + }, + "contentEncoding": { + "type": "string", + "description": "If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.", + "location": "query" + }, + "ifGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation matches the given value.", + "format": "int64", + "location": "query" + }, + "ifGenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "name": { + "type": "string", + "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "query" + }, + "predefinedAcl": { + "type": "string", + "description": "Apply a predefined set of access controls to this object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the acl property." + ], + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "request": { + "$ref": "Object" + }, + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsMediaDownload": true, + "supportsMediaUpload": true, + "mediaUpload": { + "accept": [ + "*/*" + ], + "protocols": { + "simple": { + "multipart": true, + "path": "/upload/storage/v1/b/{bucket}/o" + }, + "resumable": { + "multipart": true, + "path": "/resumable/upload/storage/v1/b/{bucket}/o" + } + } + } + }, + "list": { + "id": "storage.objects.list", + "path": "b/{bucket}/o", + "httpMethod": "GET", + "description": "Retrieves a list of objects matching the criteria.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of the bucket in which to look for objects.", + "required": true, + "location": "path" + }, + "delimiter": { + "type": "string", + "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "Maximum number of items plus prefixes to return. As duplicate prefixes are omitted, fewer total results may be returned than requested. The default value of this parameter is 1,000 items.", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query" + }, + "prefix": { + "type": "string", + "description": "Filter results to objects whose names begin with this prefix.", + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to noAcl.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the acl property." + ], + "location": "query" + }, + "versions": { + "type": "boolean", + "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "response": { + "$ref": "Objects" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsSubscription": true + }, + "patch": { + "id": "storage.objects.patch", + "path": "b/{bucket}/o/{object}", + "httpMethod": "PATCH", + "description": "Updates an object's metadata. This method supports patch semantics.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of the bucket in which the object resides.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "ifGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation matches the given value.", + "format": "int64", + "location": "query" + }, + "ifGenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + }, + "predefinedAcl": { + "type": "string", + "description": "Apply a predefined set of access controls to this object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the acl property." + ], + "location": "query" + } + }, + "parameterOrder": [ + "bucket", + "object" + ], + "request": { + "$ref": "Object" + }, + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "rewrite": { + "id": "storage.objects.rewrite", + "path": "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}", + "httpMethod": "POST", + "description": "Rewrites a source object to a destination object. Optionally overrides metadata.", + "parameters": { + "destinationBucket": { + "type": "string", + "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", + "required": true, + "location": "path" + }, + "destinationObject": { + "type": "string", + "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + }, + "destinationPredefinedAcl": { + "type": "string", + "description": "Apply a predefined set of access controls to the destination object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query" + }, + "ifGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the destination object's current generation matches the given value.", + "format": "int64", + "location": "query" + }, + "ifGenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "ifSourceGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the source object's generation matches the given value.", + "format": "int64", + "location": "query" + }, + "ifSourceGenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the source object's generation does not match the given value.", + "format": "int64", + "location": "query" + }, + "ifSourceMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifSourceMetagenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "maxBytesRewrittenPerCall": { + "type": "string", + "description": "The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.", + "format": "int64", + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the acl property." + ], + "location": "query" + }, + "rewriteToken": { + "type": "string", + "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.", + "location": "query" + }, + "sourceBucket": { + "type": "string", + "description": "Name of the bucket in which to find the source object.", + "required": true, + "location": "path" + }, + "sourceGeneration": { + "type": "string", + "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "sourceObject": { + "type": "string", + "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "sourceBucket", + "sourceObject", + "destinationBucket", + "destinationObject" + ], + "request": { + "$ref": "Object" + }, + "response": { + "$ref": "RewriteResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "update": { + "id": "storage.objects.update", + "path": "b/{bucket}/o/{object}", + "httpMethod": "PUT", + "description": "Updates an object's metadata.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of the bucket in which the object resides.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "ifGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation matches the given value.", + "format": "int64", + "location": "query" + }, + "ifGenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + }, + "predefinedAcl": { + "type": "string", + "description": "Apply a predefined set of access controls to this object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the acl property." + ], + "location": "query" + } + }, + "parameterOrder": [ + "bucket", + "object" + ], + "request": { + "$ref": "Object" + }, + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsMediaDownload": true + }, + "watchAll": { + "id": "storage.objects.watchAll", + "path": "b/{bucket}/o/watch", + "httpMethod": "POST", + "description": "Watch for changes on all objects in a bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of the bucket in which to look for objects.", + "required": true, + "location": "path" + }, + "delimiter": { + "type": "string", + "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "Maximum number of items plus prefixes to return. As duplicate prefixes are omitted, fewer total results may be returned than requested. The default value of this parameter is 1,000 items.", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query" + }, + "prefix": { + "type": "string", + "description": "Filter results to objects whose names begin with this prefix.", + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to noAcl.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the acl property." + ], + "location": "query" + }, + "versions": { + "type": "boolean", + "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "request": { + "$ref": "Channel", + "parameterName": "resource" + }, + "response": { + "$ref": "Channel" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsSubscription": true + } + } + } + } +} diff --git a/Godeps/_workspace/src/google.golang.org/api/storage/v1/storage-gen.go b/Godeps/_workspace/src/google.golang.org/api/storage/v1/storage-gen.go new file mode 100644 index 000000000..1a0d6b4fc --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/api/storage/v1/storage-gen.go @@ -0,0 +1,6411 @@ +// Package storage provides access to the Cloud Storage API. +// +// See https://developers.google.com/storage/docs/json_api/ +// +// Usage example: +// +// import "google.golang.org/api/storage/v1" +// ... +// storageService, err := storage.New(oauthHttpClient) +package storage + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "golang.org/x/net/context" + "google.golang.org/api/googleapi" + "io" + "net/http" + "net/url" + "strconv" + "strings" +) + +// Always reference these packages, just in case the auto-generated code +// below doesn't. +var _ = bytes.NewBuffer +var _ = strconv.Itoa +var _ = fmt.Sprintf +var _ = json.NewDecoder +var _ = io.Copy +var _ = url.Parse +var _ = googleapi.Version +var _ = errors.New +var _ = strings.Replace +var _ = context.Background + +const apiId = "storage:v1" +const apiName = "storage" +const apiVersion = "v1" +const basePath = "https://www.googleapis.com/storage/v1/" + +// OAuth2 scopes used by this API. +const ( + // View and manage your data across Google Cloud Platform services + CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" + + // MESSAGE UNDER CONSTRUCTION View your data across Google Cloud + // Platform services + CloudPlatformReadOnlyScope = "https://www.googleapis.com/auth/cloud-platform.read-only" + + // Manage your data and permissions in Google Cloud Storage + DevstorageFullControlScope = "https://www.googleapis.com/auth/devstorage.full_control" + + // View your data in Google Cloud Storage + DevstorageReadOnlyScope = "https://www.googleapis.com/auth/devstorage.read_only" + + // Manage your data in Google Cloud Storage + DevstorageReadWriteScope = "https://www.googleapis.com/auth/devstorage.read_write" +) + +func New(client *http.Client) (*Service, error) { + if client == nil { + return nil, errors.New("client is nil") + } + s := &Service{client: client, BasePath: basePath} + s.BucketAccessControls = NewBucketAccessControlsService(s) + s.Buckets = NewBucketsService(s) + s.Channels = NewChannelsService(s) + s.DefaultObjectAccessControls = NewDefaultObjectAccessControlsService(s) + s.ObjectAccessControls = NewObjectAccessControlsService(s) + s.Objects = NewObjectsService(s) + return s, nil +} + +type Service struct { + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + + BucketAccessControls *BucketAccessControlsService + + Buckets *BucketsService + + Channels *ChannelsService + + DefaultObjectAccessControls *DefaultObjectAccessControlsService + + ObjectAccessControls *ObjectAccessControlsService + + Objects *ObjectsService +} + +func (s *Service) userAgent() string { + if s.UserAgent == "" { + return googleapi.UserAgent + } + return googleapi.UserAgent + " " + s.UserAgent +} + +func NewBucketAccessControlsService(s *Service) *BucketAccessControlsService { + rs := &BucketAccessControlsService{s: s} + return rs +} + +type BucketAccessControlsService struct { + s *Service +} + +func NewBucketsService(s *Service) *BucketsService { + rs := &BucketsService{s: s} + return rs +} + +type BucketsService struct { + s *Service +} + +func NewChannelsService(s *Service) *ChannelsService { + rs := &ChannelsService{s: s} + return rs +} + +type ChannelsService struct { + s *Service +} + +func NewDefaultObjectAccessControlsService(s *Service) *DefaultObjectAccessControlsService { + rs := &DefaultObjectAccessControlsService{s: s} + return rs +} + +type DefaultObjectAccessControlsService struct { + s *Service +} + +func NewObjectAccessControlsService(s *Service) *ObjectAccessControlsService { + rs := &ObjectAccessControlsService{s: s} + return rs +} + +type ObjectAccessControlsService struct { + s *Service +} + +func NewObjectsService(s *Service) *ObjectsService { + rs := &ObjectsService{s: s} + return rs +} + +type ObjectsService struct { + s *Service +} + +// Bucket: A bucket. +type Bucket struct { + // Acl: Access controls on the bucket. + Acl []*BucketAccessControl `json:"acl,omitempty"` + + // Cors: The bucket's Cross-Origin Resource Sharing (CORS) + // configuration. + Cors []*BucketCors `json:"cors,omitempty"` + + // DefaultObjectAcl: Default access controls to apply to new objects + // when no ACL is provided. + DefaultObjectAcl []*ObjectAccessControl `json:"defaultObjectAcl,omitempty"` + + // Etag: HTTP 1.1 Entity tag for the bucket. + Etag string `json:"etag,omitempty"` + + // Id: The ID of the bucket. + Id string `json:"id,omitempty"` + + // Kind: The kind of item this is. For buckets, this is always + // storage#bucket. + Kind string `json:"kind,omitempty"` + + // Lifecycle: The bucket's lifecycle configuration. See lifecycle + // management for more information. + Lifecycle *BucketLifecycle `json:"lifecycle,omitempty"` + + // Location: The location of the bucket. Object data for objects in the + // bucket resides in physical storage within this region. Defaults to + // US. See the developer's guide for the authoritative list. + Location string `json:"location,omitempty"` + + // Logging: The bucket's logging configuration, which defines the + // destination bucket and optional name prefix for the current bucket's + // logs. + Logging *BucketLogging `json:"logging,omitempty"` + + // Metageneration: The metadata generation of this bucket. + Metageneration int64 `json:"metageneration,omitempty,string"` + + // Name: The name of the bucket. + Name string `json:"name,omitempty"` + + // Owner: The owner of the bucket. This is always the project team's + // owner group. + Owner *BucketOwner `json:"owner,omitempty"` + + // ProjectNumber: The project number of the project the bucket belongs + // to. + ProjectNumber uint64 `json:"projectNumber,omitempty,string"` + + // SelfLink: The URI of this bucket. + SelfLink string `json:"selfLink,omitempty"` + + // StorageClass: The bucket's storage class. This defines how objects in + // the bucket are stored and determines the SLA and the cost of storage. + // Values include STANDARD, NEARLINE and DURABLE_REDUCED_AVAILABILITY. + // Defaults to STANDARD. For more information, see storage classes. + StorageClass string `json:"storageClass,omitempty"` + + // TimeCreated: Creation time of the bucket in RFC 3339 format. + TimeCreated string `json:"timeCreated,omitempty"` + + // Versioning: The bucket's versioning configuration. + Versioning *BucketVersioning `json:"versioning,omitempty"` + + // Website: The bucket's website configuration. + Website *BucketWebsite `json:"website,omitempty"` +} + +type BucketCors struct { + // MaxAgeSeconds: The value, in seconds, to return in the + // Access-Control-Max-Age header used in preflight responses. + MaxAgeSeconds int64 `json:"maxAgeSeconds,omitempty"` + + // Method: The list of HTTP methods on which to include CORS response + // headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list + // of methods, and means "any method". + Method []string `json:"method,omitempty"` + + // Origin: The list of Origins eligible to receive CORS response + // headers. Note: "*" is permitted in the list of origins, and means + // "any Origin". + Origin []string `json:"origin,omitempty"` + + // ResponseHeader: The list of HTTP headers other than the simple + // response headers to give permission for the user-agent to share + // across domains. + ResponseHeader []string `json:"responseHeader,omitempty"` +} + +// BucketLifecycle: The bucket's lifecycle configuration. See lifecycle +// management for more information. +type BucketLifecycle struct { + // Rule: A lifecycle management rule, which is made of an action to take + // and the condition(s) under which the action will be taken. + Rule []*BucketLifecycleRule `json:"rule,omitempty"` +} + +type BucketLifecycleRule struct { + // Action: The action to take. + Action *BucketLifecycleRuleAction `json:"action,omitempty"` + + // Condition: The condition(s) under which the action will be taken. + Condition *BucketLifecycleRuleCondition `json:"condition,omitempty"` +} + +// BucketLifecycleRuleAction: The action to take. +type BucketLifecycleRuleAction struct { + // Type: Type of the action. Currently, only Delete is supported. + Type string `json:"type,omitempty"` +} + +// BucketLifecycleRuleCondition: The condition(s) under which the action +// will be taken. +type BucketLifecycleRuleCondition struct { + // Age: Age of an object (in days). This condition is satisfied when an + // object reaches the specified age. + Age int64 `json:"age,omitempty"` + + // CreatedBefore: A date in RFC 3339 format with only the date part (for + // instance, "2013-01-15"). This condition is satisfied when an object + // is created before midnight of the specified date in UTC. + CreatedBefore string `json:"createdBefore,omitempty"` + + // IsLive: Relevant only for versioned objects. If the value is true, + // this condition matches live objects; if the value is false, it + // matches archived objects. + IsLive bool `json:"isLive,omitempty"` + + // NumNewerVersions: Relevant only for versioned objects. If the value + // is N, this condition is satisfied when there are at least N versions + // (including the live version) newer than this version of the object. + NumNewerVersions int64 `json:"numNewerVersions,omitempty"` +} + +// BucketLogging: The bucket's logging configuration, which defines the +// destination bucket and optional name prefix for the current bucket's +// logs. +type BucketLogging struct { + // LogBucket: The destination bucket where the current bucket's logs + // should be placed. + LogBucket string `json:"logBucket,omitempty"` + + // LogObjectPrefix: A prefix for log object names. + LogObjectPrefix string `json:"logObjectPrefix,omitempty"` +} + +// BucketOwner: The owner of the bucket. This is always the project +// team's owner group. +type BucketOwner struct { + // Entity: The entity, in the form project-owner-projectId. + Entity string `json:"entity,omitempty"` + + // EntityId: The ID for the entity. + EntityId string `json:"entityId,omitempty"` +} + +// BucketVersioning: The bucket's versioning configuration. +type BucketVersioning struct { + // Enabled: While set to true, versioning is fully enabled for this + // bucket. + Enabled bool `json:"enabled,omitempty"` +} + +// BucketWebsite: The bucket's website configuration. +type BucketWebsite struct { + // MainPageSuffix: Behaves as the bucket's directory index where missing + // objects are treated as potential directories. + MainPageSuffix string `json:"mainPageSuffix,omitempty"` + + // NotFoundPage: The custom object to return when a requested resource + // is not found. + NotFoundPage string `json:"notFoundPage,omitempty"` +} + +// BucketAccessControl: An access-control entry. +type BucketAccessControl struct { + // Bucket: The name of the bucket. + Bucket string `json:"bucket,omitempty"` + + // Domain: The domain associated with the entity, if any. + Domain string `json:"domain,omitempty"` + + // Email: The email address associated with the entity, if any. + Email string `json:"email,omitempty"` + + // Entity: The entity holding the permission, in one of the following + // forms: + // - user-userId + // - user-email + // - group-groupId + // - group-email + // - domain-domain + // - project-team-projectId + // - allUsers + // - allAuthenticatedUsers Examples: + // - The user liz@example.com would be user-liz@example.com. + // - The group example@googlegroups.com would be + // group-example@googlegroups.com. + // - To refer to all members of the Google Apps for Business domain + // example.com, the entity would be domain-example.com. + Entity string `json:"entity,omitempty"` + + // EntityId: The ID for the entity, if any. + EntityId string `json:"entityId,omitempty"` + + // Etag: HTTP 1.1 Entity tag for the access-control entry. + Etag string `json:"etag,omitempty"` + + // Id: The ID of the access-control entry. + Id string `json:"id,omitempty"` + + // Kind: The kind of item this is. For bucket access control entries, + // this is always storage#bucketAccessControl. + Kind string `json:"kind,omitempty"` + + // ProjectTeam: The project team associated with the entity, if any. + ProjectTeam *BucketAccessControlProjectTeam `json:"projectTeam,omitempty"` + + // Role: The access permission for the entity. Can be READER, WRITER, or + // OWNER. + Role string `json:"role,omitempty"` + + // SelfLink: The link to this access-control entry. + SelfLink string `json:"selfLink,omitempty"` +} + +// BucketAccessControlProjectTeam: The project team associated with the +// entity, if any. +type BucketAccessControlProjectTeam struct { + // ProjectNumber: The project number. + ProjectNumber string `json:"projectNumber,omitempty"` + + // Team: The team. Can be owners, editors, or viewers. + Team string `json:"team,omitempty"` +} + +// BucketAccessControls: An access-control list. +type BucketAccessControls struct { + // Items: The list of items. + Items []*BucketAccessControl `json:"items,omitempty"` + + // Kind: The kind of item this is. For lists of bucket access control + // entries, this is always storage#bucketAccessControls. + Kind string `json:"kind,omitempty"` +} + +// Buckets: A list of buckets. +type Buckets struct { + // Items: The list of items. + Items []*Bucket `json:"items,omitempty"` + + // Kind: The kind of item this is. For lists of buckets, this is always + // storage#buckets. + Kind string `json:"kind,omitempty"` + + // NextPageToken: The continuation token, used to page through large + // result sets. Provide this value in a subsequent request to return the + // next page of results. + NextPageToken string `json:"nextPageToken,omitempty"` +} + +// Channel: An notification channel used to watch for resource changes. +type Channel struct { + // Address: The address where notifications are delivered for this + // channel. + Address string `json:"address,omitempty"` + + // Expiration: Date and time of notification channel expiration, + // expressed as a Unix timestamp, in milliseconds. Optional. + Expiration int64 `json:"expiration,omitempty,string"` + + // Id: A UUID or similar unique string that identifies this channel. + Id string `json:"id,omitempty"` + + // Kind: Identifies this as a notification channel used to watch for + // changes to a resource. Value: the fixed string "api#channel". + Kind string `json:"kind,omitempty"` + + // Params: Additional parameters controlling delivery channel behavior. + // Optional. + Params map[string]string `json:"params,omitempty"` + + // Payload: A Boolean value to indicate whether payload is wanted. + // Optional. + Payload bool `json:"payload,omitempty"` + + // ResourceId: An opaque ID that identifies the resource being watched + // on this channel. Stable across different API versions. + ResourceId string `json:"resourceId,omitempty"` + + // ResourceUri: A version-specific identifier for the watched resource. + ResourceUri string `json:"resourceUri,omitempty"` + + // Token: An arbitrary string delivered to the target address with each + // notification delivered over this channel. Optional. + Token string `json:"token,omitempty"` + + // Type: The type of delivery mechanism used for this channel. + Type string `json:"type,omitempty"` +} + +// ComposeRequest: A Compose request. +type ComposeRequest struct { + // Destination: Properties of the resulting object. + Destination *Object `json:"destination,omitempty"` + + // Kind: The kind of item this is. + Kind string `json:"kind,omitempty"` + + // SourceObjects: The list of source objects that will be concatenated + // into a single object. + SourceObjects []*ComposeRequestSourceObjects `json:"sourceObjects,omitempty"` +} + +type ComposeRequestSourceObjects struct { + // Generation: The generation of this object to use as the source. + Generation int64 `json:"generation,omitempty,string"` + + // Name: The source object's name. The source object's bucket is + // implicitly the destination bucket. + Name string `json:"name,omitempty"` + + // ObjectPreconditions: Conditions that must be met for this operation + // to execute. + ObjectPreconditions *ComposeRequestSourceObjectsObjectPreconditions `json:"objectPreconditions,omitempty"` +} + +// ComposeRequestSourceObjectsObjectPreconditions: Conditions that must +// be met for this operation to execute. +type ComposeRequestSourceObjectsObjectPreconditions struct { + // IfGenerationMatch: Only perform the composition if the generation of + // the source object that would be used matches this value. If this + // value and a generation are both specified, they must be the same + // value or the call will fail. + IfGenerationMatch int64 `json:"ifGenerationMatch,omitempty,string"` +} + +// Object: An object. +type Object struct { + // Acl: Access controls on the object. + Acl []*ObjectAccessControl `json:"acl,omitempty"` + + // Bucket: The name of the bucket containing this object. + Bucket string `json:"bucket,omitempty"` + + // CacheControl: Cache-Control directive for the object data. + CacheControl string `json:"cacheControl,omitempty"` + + // ComponentCount: Number of underlying components that make up this + // object. Components are accumulated by compose operations. + ComponentCount int64 `json:"componentCount,omitempty"` + + // ContentDisposition: Content-Disposition of the object data. + ContentDisposition string `json:"contentDisposition,omitempty"` + + // ContentEncoding: Content-Encoding of the object data. + ContentEncoding string `json:"contentEncoding,omitempty"` + + // ContentLanguage: Content-Language of the object data. + ContentLanguage string `json:"contentLanguage,omitempty"` + + // ContentType: Content-Type of the object data. + ContentType string `json:"contentType,omitempty"` + + // Crc32c: CRC32c checksum, as described in RFC 4960, Appendix B; + // encoded using base64 in big-endian byte order. For more information + // about using the CRC32c checksum, see Hashes and ETags: Best + // Practices. + Crc32c string `json:"crc32c,omitempty"` + + // Etag: HTTP 1.1 Entity tag for the object. + Etag string `json:"etag,omitempty"` + + // Generation: The content generation of this object. Used for object + // versioning. + Generation int64 `json:"generation,omitempty,string"` + + // Id: The ID of the object. + Id string `json:"id,omitempty"` + + // Kind: The kind of item this is. For objects, this is always + // storage#object. + Kind string `json:"kind,omitempty"` + + // Md5Hash: MD5 hash of the data; encoded using base64. For more + // information about using the MD5 hash, see Hashes and ETags: Best + // Practices. + Md5Hash string `json:"md5Hash,omitempty"` + + // MediaLink: Media download link. + MediaLink string `json:"mediaLink,omitempty"` + + // Metadata: User-provided metadata, in key/value pairs. + Metadata map[string]string `json:"metadata,omitempty"` + + // Metageneration: The version of the metadata for this object at this + // generation. Used for preconditions and for detecting changes in + // metadata. A metageneration number is only meaningful in the context + // of a particular generation of a particular object. + Metageneration int64 `json:"metageneration,omitempty,string"` + + // Name: The name of this object. Required if not specified by URL + // parameter. + Name string `json:"name,omitempty"` + + // Owner: The owner of the object. This will always be the uploader of + // the object. + Owner *ObjectOwner `json:"owner,omitempty"` + + // SelfLink: The link to this object. + SelfLink string `json:"selfLink,omitempty"` + + // Size: Content-Length of the data in bytes. + Size uint64 `json:"size,omitempty,string"` + + // StorageClass: Storage class of the object. + StorageClass string `json:"storageClass,omitempty"` + + // TimeDeleted: The deletion time of the object in RFC 3339 format. Will + // be returned if and only if this version of the object has been + // deleted. + TimeDeleted string `json:"timeDeleted,omitempty"` + + // Updated: The creation or modification time of the object in RFC 3339 + // format. For buckets with versioning enabled, changing an object's + // metadata does not change this property. + Updated string `json:"updated,omitempty"` +} + +// ObjectOwner: The owner of the object. This will always be the +// uploader of the object. +type ObjectOwner struct { + // Entity: The entity, in the form user-userId. + Entity string `json:"entity,omitempty"` + + // EntityId: The ID for the entity. + EntityId string `json:"entityId,omitempty"` +} + +// ObjectAccessControl: An access-control entry. +type ObjectAccessControl struct { + // Bucket: The name of the bucket. + Bucket string `json:"bucket,omitempty"` + + // Domain: The domain associated with the entity, if any. + Domain string `json:"domain,omitempty"` + + // Email: The email address associated with the entity, if any. + Email string `json:"email,omitempty"` + + // Entity: The entity holding the permission, in one of the following + // forms: + // - user-userId + // - user-email + // - group-groupId + // - group-email + // - domain-domain + // - project-team-projectId + // - allUsers + // - allAuthenticatedUsers Examples: + // - The user liz@example.com would be user-liz@example.com. + // - The group example@googlegroups.com would be + // group-example@googlegroups.com. + // - To refer to all members of the Google Apps for Business domain + // example.com, the entity would be domain-example.com. + Entity string `json:"entity,omitempty"` + + // EntityId: The ID for the entity, if any. + EntityId string `json:"entityId,omitempty"` + + // Etag: HTTP 1.1 Entity tag for the access-control entry. + Etag string `json:"etag,omitempty"` + + // Generation: The content generation of the object. + Generation int64 `json:"generation,omitempty,string"` + + // Id: The ID of the access-control entry. + Id string `json:"id,omitempty"` + + // Kind: The kind of item this is. For object access control entries, + // this is always storage#objectAccessControl. + Kind string `json:"kind,omitempty"` + + // Object: The name of the object. + Object string `json:"object,omitempty"` + + // ProjectTeam: The project team associated with the entity, if any. + ProjectTeam *ObjectAccessControlProjectTeam `json:"projectTeam,omitempty"` + + // Role: The access permission for the entity. Can be READER or OWNER. + Role string `json:"role,omitempty"` + + // SelfLink: The link to this access-control entry. + SelfLink string `json:"selfLink,omitempty"` +} + +// ObjectAccessControlProjectTeam: The project team associated with the +// entity, if any. +type ObjectAccessControlProjectTeam struct { + // ProjectNumber: The project number. + ProjectNumber string `json:"projectNumber,omitempty"` + + // Team: The team. Can be owners, editors, or viewers. + Team string `json:"team,omitempty"` +} + +// ObjectAccessControls: An access-control list. +type ObjectAccessControls struct { + // Items: The list of items. + Items []interface{} `json:"items,omitempty"` + + // Kind: The kind of item this is. For lists of object access control + // entries, this is always storage#objectAccessControls. + Kind string `json:"kind,omitempty"` +} + +// Objects: A list of objects. +type Objects struct { + // Items: The list of items. + Items []*Object `json:"items,omitempty"` + + // Kind: The kind of item this is. For lists of objects, this is always + // storage#objects. + Kind string `json:"kind,omitempty"` + + // NextPageToken: The continuation token, used to page through large + // result sets. Provide this value in a subsequent request to return the + // next page of results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Prefixes: The list of prefixes of objects matching-but-not-listed up + // to and including the requested delimiter. + Prefixes []string `json:"prefixes,omitempty"` +} + +// RewriteResponse: A rewrite response. +type RewriteResponse struct { + // Done: true if the copy is finished; otherwise, false if the copy is + // in progress. This property is always present in the response. + Done bool `json:"done,omitempty"` + + // Kind: The kind of item this is. + Kind string `json:"kind,omitempty"` + + // ObjectSize: The total size of the object being copied in bytes. This + // property is always present in the response. + ObjectSize uint64 `json:"objectSize,omitempty,string"` + + // Resource: A resource containing the metadata for the copied-to + // object. This property is present in the response only when copying + // completes. + Resource *Object `json:"resource,omitempty"` + + // RewriteToken: A token to use in subsequent requests to continue + // copying data. This token is present in the response only when there + // is more data to copy. + RewriteToken string `json:"rewriteToken,omitempty"` + + // TotalBytesRewritten: The total bytes written so far, which can be + // used to provide a waiting user with a progress indicator. This + // property is always present in the response. + TotalBytesRewritten uint64 `json:"totalBytesRewritten,omitempty,string"` +} + +// method id "storage.bucketAccessControls.delete": + +type BucketAccessControlsDeleteCall struct { + s *Service + bucket string + entity string + opt_ map[string]interface{} +} + +// Delete: Permanently deletes the ACL entry for the specified entity on +// the specified bucket. +func (r *BucketAccessControlsService) Delete(bucket string, entity string) *BucketAccessControlsDeleteCall { + c := &BucketAccessControlsDeleteCall{s: r.s, opt_: make(map[string]interface{})} + c.bucket = bucket + c.entity = entity + return c +} + +// Fields allows partial responses to be retrieved. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketAccessControlsDeleteCall) Fields(s ...googleapi.Field) *BucketAccessControlsDeleteCall { + c.opt_["fields"] = googleapi.CombineFields(s) + return c +} + +func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + params := make(url.Values) + params.Set("alt", alt) + if v, ok := c.opt_["fields"]; ok { + params.Set("fields", fmt.Sprintf("%v", v)) + } + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") + urls += "?" + params.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + return c.s.client.Do(req) +} + +func (c *BucketAccessControlsDeleteCall) Do() error { + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Permanently deletes the ACL entry for the specified entity on the specified bucket.", + // "httpMethod": "DELETE", + // "id": "storage.bucketAccessControls.delete", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/acl/{entity}", + // "scopes": [ + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.bucketAccessControls.get": + +type BucketAccessControlsGetCall struct { + s *Service + bucket string + entity string + opt_ map[string]interface{} +} + +// Get: Returns the ACL entry for the specified entity on the specified +// bucket. +func (r *BucketAccessControlsService) Get(bucket string, entity string) *BucketAccessControlsGetCall { + c := &BucketAccessControlsGetCall{s: r.s, opt_: make(map[string]interface{})} + c.bucket = bucket + c.entity = entity + return c +} + +// Fields allows partial responses to be retrieved. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketAccessControlsGetCall) Fields(s ...googleapi.Field) *BucketAccessControlsGetCall { + c.opt_["fields"] = googleapi.CombineFields(s) + return c +} + +func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + params := make(url.Values) + params.Set("alt", alt) + if v, ok := c.opt_["fields"]; ok { + params.Set("fields", fmt.Sprintf("%v", v)) + } + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") + urls += "?" + params.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + return c.s.client.Do(req) +} + +func (c *BucketAccessControlsGetCall) Do() (*BucketAccessControl, error) { + res, err := c.doRequest("json") + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + var ret *BucketAccessControl + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the ACL entry for the specified entity on the specified bucket.", + // "httpMethod": "GET", + // "id": "storage.bucketAccessControls.get", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/acl/{entity}", + // "response": { + // "$ref": "BucketAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.bucketAccessControls.insert": + +type BucketAccessControlsInsertCall struct { + s *Service + bucket string + bucketaccesscontrol *BucketAccessControl + opt_ map[string]interface{} +} + +// Insert: Creates a new ACL entry on the specified bucket. +func (r *BucketAccessControlsService) Insert(bucket string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsInsertCall { + c := &BucketAccessControlsInsertCall{s: r.s, opt_: make(map[string]interface{})} + c.bucket = bucket + c.bucketaccesscontrol = bucketaccesscontrol + return c +} + +// Fields allows partial responses to be retrieved. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketAccessControlsInsertCall) Fields(s ...googleapi.Field) *BucketAccessControlsInsertCall { + c.opt_["fields"] = googleapi.CombineFields(s) + return c +} + +func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) + if err != nil { + return nil, err + } + ctype := "application/json" + params := make(url.Values) + params.Set("alt", alt) + if v, ok := c.opt_["fields"]; ok { + params.Set("fields", fmt.Sprintf("%v", v)) + } + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") + urls += "?" + params.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + return c.s.client.Do(req) +} + +func (c *BucketAccessControlsInsertCall) Do() (*BucketAccessControl, error) { + res, err := c.doRequest("json") + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + var ret *BucketAccessControl + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new ACL entry on the specified bucket.", + // "httpMethod": "POST", + // "id": "storage.bucketAccessControls.insert", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/acl", + // "request": { + // "$ref": "BucketAccessControl" + // }, + // "response": { + // "$ref": "BucketAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.bucketAccessControls.list": + +type BucketAccessControlsListCall struct { + s *Service + bucket string + opt_ map[string]interface{} +} + +// List: Retrieves ACL entries on the specified bucket. +func (r *BucketAccessControlsService) List(bucket string) *BucketAccessControlsListCall { + c := &BucketAccessControlsListCall{s: r.s, opt_: make(map[string]interface{})} + c.bucket = bucket + return c +} + +// Fields allows partial responses to be retrieved. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketAccessControlsListCall) Fields(s ...googleapi.Field) *BucketAccessControlsListCall { + c.opt_["fields"] = googleapi.CombineFields(s) + return c +} + +func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + params := make(url.Values) + params.Set("alt", alt) + if v, ok := c.opt_["fields"]; ok { + params.Set("fields", fmt.Sprintf("%v", v)) + } + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") + urls += "?" + params.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + return c.s.client.Do(req) +} + +func (c *BucketAccessControlsListCall) Do() (*BucketAccessControls, error) { + res, err := c.doRequest("json") + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + var ret *BucketAccessControls + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves ACL entries on the specified bucket.", + // "httpMethod": "GET", + // "id": "storage.bucketAccessControls.list", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/acl", + // "response": { + // "$ref": "BucketAccessControls" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.bucketAccessControls.patch": + +type BucketAccessControlsPatchCall struct { + s *Service + bucket string + entity string + bucketaccesscontrol *BucketAccessControl + opt_ map[string]interface{} +} + +// Patch: Updates an ACL entry on the specified bucket. This method +// supports patch semantics. +func (r *BucketAccessControlsService) Patch(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsPatchCall { + c := &BucketAccessControlsPatchCall{s: r.s, opt_: make(map[string]interface{})} + c.bucket = bucket + c.entity = entity + c.bucketaccesscontrol = bucketaccesscontrol + return c +} + +// Fields allows partial responses to be retrieved. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketAccessControlsPatchCall) Fields(s ...googleapi.Field) *BucketAccessControlsPatchCall { + c.opt_["fields"] = googleapi.CombineFields(s) + return c +} + +func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) + if err != nil { + return nil, err + } + ctype := "application/json" + params := make(url.Values) + params.Set("alt", alt) + if v, ok := c.opt_["fields"]; ok { + params.Set("fields", fmt.Sprintf("%v", v)) + } + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") + urls += "?" + params.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + return c.s.client.Do(req) +} + +func (c *BucketAccessControlsPatchCall) Do() (*BucketAccessControl, error) { + res, err := c.doRequest("json") + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + var ret *BucketAccessControl + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an ACL entry on the specified bucket. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "storage.bucketAccessControls.patch", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/acl/{entity}", + // "request": { + // "$ref": "BucketAccessControl" + // }, + // "response": { + // "$ref": "BucketAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.bucketAccessControls.update": + +type BucketAccessControlsUpdateCall struct { + s *Service + bucket string + entity string + bucketaccesscontrol *BucketAccessControl + opt_ map[string]interface{} +} + +// Update: Updates an ACL entry on the specified bucket. +func (r *BucketAccessControlsService) Update(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsUpdateCall { + c := &BucketAccessControlsUpdateCall{s: r.s, opt_: make(map[string]interface{})} + c.bucket = bucket + c.entity = entity + c.bucketaccesscontrol = bucketaccesscontrol + return c +} + +// Fields allows partial responses to be retrieved. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketAccessControlsUpdateCall) Fields(s ...googleapi.Field) *BucketAccessControlsUpdateCall { + c.opt_["fields"] = googleapi.CombineFields(s) + return c +} + +func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) + if err != nil { + return nil, err + } + ctype := "application/json" + params := make(url.Values) + params.Set("alt", alt) + if v, ok := c.opt_["fields"]; ok { + params.Set("fields", fmt.Sprintf("%v", v)) + } + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") + urls += "?" + params.Encode() + req, _ := http.NewRequest("PUT", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + return c.s.client.Do(req) +} + +func (c *BucketAccessControlsUpdateCall) Do() (*BucketAccessControl, error) { + res, err := c.doRequest("json") + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + var ret *BucketAccessControl + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an ACL entry on the specified bucket.", + // "httpMethod": "PUT", + // "id": "storage.bucketAccessControls.update", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/acl/{entity}", + // "request": { + // "$ref": "BucketAccessControl" + // }, + // "response": { + // "$ref": "BucketAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.buckets.delete": + +type BucketsDeleteCall struct { + s *Service + bucket string + opt_ map[string]interface{} +} + +// Delete: Permanently deletes an empty bucket. +func (r *BucketsService) Delete(bucket string) *BucketsDeleteCall { + c := &BucketsDeleteCall{s: r.s, opt_: make(map[string]interface{})} + c.bucket = bucket + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": If set, only deletes the bucket if its +// metageneration matches this value. +func (c *BucketsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsDeleteCall { + c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": If set, only deletes the bucket if its +// metageneration does not match this value. +func (c *BucketsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsDeleteCall { + c.opt_["ifMetagenerationNotMatch"] = ifMetagenerationNotMatch + return c +} + +// Fields allows partial responses to be retrieved. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsDeleteCall) Fields(s ...googleapi.Field) *BucketsDeleteCall { + c.opt_["fields"] = googleapi.CombineFields(s) + return c +} + +func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + params := make(url.Values) + params.Set("alt", alt) + if v, ok := c.opt_["ifMetagenerationMatch"]; ok { + params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifMetagenerationNotMatch"]; ok { + params.Set("ifMetagenerationNotMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["fields"]; ok { + params.Set("fields", fmt.Sprintf("%v", v)) + } + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") + urls += "?" + params.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + return c.s.client.Do(req) +} + +func (c *BucketsDeleteCall) Do() error { + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Permanently deletes an empty bucket.", + // "httpMethod": "DELETE", + // "id": "storage.buckets.delete", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "If set, only deletes the bucket if its metageneration matches this value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "If set, only deletes the bucket if its metageneration does not match this value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.buckets.get": + +type BucketsGetCall struct { + s *Service + bucket string + opt_ map[string]interface{} +} + +// Get: Returns metadata for the specified bucket. +func (r *BucketsService) Get(bucket string) *BucketsGetCall { + c := &BucketsGetCall{s: r.s, opt_: make(map[string]interface{})} + c.bucket = bucket + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration matches +// the given value. +func (c *BucketsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsGetCall { + c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration does not +// match the given value. +func (c *BucketsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsGetCall { + c.opt_["ifMetagenerationNotMatch"] = ifMetagenerationNotMatch + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit acl and defaultObjectAcl properties. +func (c *BucketsGetCall) Projection(projection string) *BucketsGetCall { + c.opt_["projection"] = projection + return c +} + +// Fields allows partial responses to be retrieved. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsGetCall) Fields(s ...googleapi.Field) *BucketsGetCall { + c.opt_["fields"] = googleapi.CombineFields(s) + return c +} + +func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + params := make(url.Values) + params.Set("alt", alt) + if v, ok := c.opt_["ifMetagenerationMatch"]; ok { + params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifMetagenerationNotMatch"]; ok { + params.Set("ifMetagenerationNotMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["projection"]; ok { + params.Set("projection", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["fields"]; ok { + params.Set("fields", fmt.Sprintf("%v", v)) + } + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") + urls += "?" + params.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + return c.s.client.Do(req) +} + +func (c *BucketsGetCall) Do() (*Bucket, error) { + res, err := c.doRequest("json") + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + var ret *Bucket + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns metadata for the specified bucket.", + // "httpMethod": "GET", + // "id": "storage.buckets.get", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit acl and defaultObjectAcl properties." + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}", + // "response": { + // "$ref": "Bucket" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.buckets.insert": + +type BucketsInsertCall struct { + s *Service + projectid string + bucket *Bucket + opt_ map[string]interface{} +} + +// Insert: Creates a new bucket. +func (r *BucketsService) Insert(projectid string, bucket *Bucket) *BucketsInsertCall { + c := &BucketsInsertCall{s: r.s, opt_: make(map[string]interface{})} + c.projectid = projectid + c.bucket = bucket + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this bucket. +// +// Possible values: +// "authenticatedRead" - Project team owners get OWNER access, and +// allAuthenticatedUsers get READER access. +// "private" - Project team owners get OWNER access. +// "projectPrivate" - Project team members get access according to +// their roles. +// "publicRead" - Project team owners get OWNER access, and allUsers +// get READER access. +// "publicReadWrite" - Project team owners get OWNER access, and +// allUsers get WRITER access. +func (c *BucketsInsertCall) PredefinedAcl(predefinedAcl string) *BucketsInsertCall { + c.opt_["predefinedAcl"] = predefinedAcl + return c +} + +// PredefinedDefaultObjectAcl sets the optional parameter +// "predefinedDefaultObjectAcl": Apply a predefined set of default +// object access controls to this bucket. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *BucketsInsertCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsInsertCall { + c.opt_["predefinedDefaultObjectAcl"] = predefinedDefaultObjectAcl + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl, unless the bucket resource +// specifies acl or defaultObjectAcl properties, when it defaults to +// full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit acl and defaultObjectAcl properties. +func (c *BucketsInsertCall) Projection(projection string) *BucketsInsertCall { + c.opt_["projection"] = projection + return c +} + +// Fields allows partial responses to be retrieved. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsInsertCall) Fields(s ...googleapi.Field) *BucketsInsertCall { + c.opt_["fields"] = googleapi.CombineFields(s) + return c +} + +func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket) + if err != nil { + return nil, err + } + ctype := "application/json" + params := make(url.Values) + params.Set("alt", alt) + params.Set("project", fmt.Sprintf("%v", c.projectid)) + if v, ok := c.opt_["predefinedAcl"]; ok { + params.Set("predefinedAcl", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["predefinedDefaultObjectAcl"]; ok { + params.Set("predefinedDefaultObjectAcl", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["projection"]; ok { + params.Set("projection", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["fields"]; ok { + params.Set("fields", fmt.Sprintf("%v", v)) + } + urls := googleapi.ResolveRelative(c.s.BasePath, "b") + urls += "?" + params.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.SetOpaque(req.URL) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + return c.s.client.Do(req) +} + +func (c *BucketsInsertCall) Do() (*Bucket, error) { + res, err := c.doRequest("json") + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + var ret *Bucket + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new bucket.", + // "httpMethod": "POST", + // "id": "storage.buckets.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "private", + // "projectPrivate", + // "publicRead", + // "publicReadWrite" + // ], + // "enumDescriptions": [ + // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + // "Project team owners get OWNER access.", + // "Project team members get access according to their roles.", + // "Project team owners get OWNER access, and allUsers get READER access.", + // "Project team owners get OWNER access, and allUsers get WRITER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "predefinedDefaultObjectAcl": { + // "description": "Apply a predefined set of default object access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "A valid API project identifier.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit acl and defaultObjectAcl properties." + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b", + // "request": { + // "$ref": "Bucket" + // }, + // "response": { + // "$ref": "Bucket" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.buckets.list": + +type BucketsListCall struct { + s *Service + projectid string + opt_ map[string]interface{} +} + +// List: Retrieves a list of buckets for a given project. +func (r *BucketsService) List(projectid string) *BucketsListCall { + c := &BucketsListCall{s: r.s, opt_: make(map[string]interface{})} + c.projectid = projectid + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of buckets to return. +func (c *BucketsListCall) MaxResults(maxResults int64) *BucketsListCall { + c.opt_["maxResults"] = maxResults + return c +} + +// PageToken sets the optional parameter "pageToken": A +// previously-returned page token representing part of the larger set of +// results to view. +func (c *BucketsListCall) PageToken(pageToken string) *BucketsListCall { + c.opt_["pageToken"] = pageToken + return c +} + +// Prefix sets the optional parameter "prefix": Filter results to +// buckets whose names begin with this prefix. +func (c *BucketsListCall) Prefix(prefix string) *BucketsListCall { + c.opt_["prefix"] = prefix + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit acl and defaultObjectAcl properties. +func (c *BucketsListCall) Projection(projection string) *BucketsListCall { + c.opt_["projection"] = projection + return c +} + +// Fields allows partial responses to be retrieved. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsListCall) Fields(s ...googleapi.Field) *BucketsListCall { + c.opt_["fields"] = googleapi.CombineFields(s) + return c +} + +func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + params := make(url.Values) + params.Set("alt", alt) + params.Set("project", fmt.Sprintf("%v", c.projectid)) + if v, ok := c.opt_["maxResults"]; ok { + params.Set("maxResults", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["pageToken"]; ok { + params.Set("pageToken", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["prefix"]; ok { + params.Set("prefix", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["projection"]; ok { + params.Set("projection", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["fields"]; ok { + params.Set("fields", fmt.Sprintf("%v", v)) + } + urls := googleapi.ResolveRelative(c.s.BasePath, "b") + urls += "?" + params.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.SetOpaque(req.URL) + req.Header.Set("User-Agent", c.s.userAgent()) + return c.s.client.Do(req) +} + +func (c *BucketsListCall) Do() (*Buckets, error) { + res, err := c.doRequest("json") + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + var ret *Buckets + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of buckets for a given project.", + // "httpMethod": "GET", + // "id": "storage.buckets.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "maxResults": { + // "description": "Maximum number of buckets to return.", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A previously-returned page token representing part of the larger set of results to view.", + // "location": "query", + // "type": "string" + // }, + // "prefix": { + // "description": "Filter results to buckets whose names begin with this prefix.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "A valid API project identifier.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit acl and defaultObjectAcl properties." + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b", + // "response": { + // "$ref": "Buckets" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.buckets.patch": + +type BucketsPatchCall struct { + s *Service + bucket string + bucket2 *Bucket + opt_ map[string]interface{} +} + +// Patch: Updates a bucket. This method supports patch semantics. +func (r *BucketsService) Patch(bucket string, bucket2 *Bucket) *BucketsPatchCall { + c := &BucketsPatchCall{s: r.s, opt_: make(map[string]interface{})} + c.bucket = bucket + c.bucket2 = bucket2 + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration matches +// the given value. +func (c *BucketsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsPatchCall { + c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration does not +// match the given value. +func (c *BucketsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsPatchCall { + c.opt_["ifMetagenerationNotMatch"] = ifMetagenerationNotMatch + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this bucket. +// +// Possible values: +// "authenticatedRead" - Project team owners get OWNER access, and +// allAuthenticatedUsers get READER access. +// "private" - Project team owners get OWNER access. +// "projectPrivate" - Project team members get access according to +// their roles. +// "publicRead" - Project team owners get OWNER access, and allUsers +// get READER access. +// "publicReadWrite" - Project team owners get OWNER access, and +// allUsers get WRITER access. +func (c *BucketsPatchCall) PredefinedAcl(predefinedAcl string) *BucketsPatchCall { + c.opt_["predefinedAcl"] = predefinedAcl + return c +} + +// PredefinedDefaultObjectAcl sets the optional parameter +// "predefinedDefaultObjectAcl": Apply a predefined set of default +// object access controls to this bucket. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *BucketsPatchCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsPatchCall { + c.opt_["predefinedDefaultObjectAcl"] = predefinedDefaultObjectAcl + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit acl and defaultObjectAcl properties. +func (c *BucketsPatchCall) Projection(projection string) *BucketsPatchCall { + c.opt_["projection"] = projection + return c +} + +// Fields allows partial responses to be retrieved. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsPatchCall) Fields(s ...googleapi.Field) *BucketsPatchCall { + c.opt_["fields"] = googleapi.CombineFields(s) + return c +} + +func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) + if err != nil { + return nil, err + } + ctype := "application/json" + params := make(url.Values) + params.Set("alt", alt) + if v, ok := c.opt_["ifMetagenerationMatch"]; ok { + params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifMetagenerationNotMatch"]; ok { + params.Set("ifMetagenerationNotMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["predefinedAcl"]; ok { + params.Set("predefinedAcl", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["predefinedDefaultObjectAcl"]; ok { + params.Set("predefinedDefaultObjectAcl", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["projection"]; ok { + params.Set("projection", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["fields"]; ok { + params.Set("fields", fmt.Sprintf("%v", v)) + } + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") + urls += "?" + params.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + return c.s.client.Do(req) +} + +func (c *BucketsPatchCall) Do() (*Bucket, error) { + res, err := c.doRequest("json") + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + var ret *Bucket + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a bucket. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "storage.buckets.patch", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "private", + // "projectPrivate", + // "publicRead", + // "publicReadWrite" + // ], + // "enumDescriptions": [ + // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + // "Project team owners get OWNER access.", + // "Project team members get access according to their roles.", + // "Project team owners get OWNER access, and allUsers get READER access.", + // "Project team owners get OWNER access, and allUsers get WRITER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "predefinedDefaultObjectAcl": { + // "description": "Apply a predefined set of default object access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit acl and defaultObjectAcl properties." + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}", + // "request": { + // "$ref": "Bucket" + // }, + // "response": { + // "$ref": "Bucket" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.buckets.update": + +type BucketsUpdateCall struct { + s *Service + bucket string + bucket2 *Bucket + opt_ map[string]interface{} +} + +// Update: Updates a bucket. +func (r *BucketsService) Update(bucket string, bucket2 *Bucket) *BucketsUpdateCall { + c := &BucketsUpdateCall{s: r.s, opt_: make(map[string]interface{})} + c.bucket = bucket + c.bucket2 = bucket2 + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration matches +// the given value. +func (c *BucketsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsUpdateCall { + c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration does not +// match the given value. +func (c *BucketsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsUpdateCall { + c.opt_["ifMetagenerationNotMatch"] = ifMetagenerationNotMatch + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this bucket. +// +// Possible values: +// "authenticatedRead" - Project team owners get OWNER access, and +// allAuthenticatedUsers get READER access. +// "private" - Project team owners get OWNER access. +// "projectPrivate" - Project team members get access according to +// their roles. +// "publicRead" - Project team owners get OWNER access, and allUsers +// get READER access. +// "publicReadWrite" - Project team owners get OWNER access, and +// allUsers get WRITER access. +func (c *BucketsUpdateCall) PredefinedAcl(predefinedAcl string) *BucketsUpdateCall { + c.opt_["predefinedAcl"] = predefinedAcl + return c +} + +// PredefinedDefaultObjectAcl sets the optional parameter +// "predefinedDefaultObjectAcl": Apply a predefined set of default +// object access controls to this bucket. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *BucketsUpdateCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsUpdateCall { + c.opt_["predefinedDefaultObjectAcl"] = predefinedDefaultObjectAcl + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit acl and defaultObjectAcl properties. +func (c *BucketsUpdateCall) Projection(projection string) *BucketsUpdateCall { + c.opt_["projection"] = projection + return c +} + +// Fields allows partial responses to be retrieved. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsUpdateCall) Fields(s ...googleapi.Field) *BucketsUpdateCall { + c.opt_["fields"] = googleapi.CombineFields(s) + return c +} + +func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) + if err != nil { + return nil, err + } + ctype := "application/json" + params := make(url.Values) + params.Set("alt", alt) + if v, ok := c.opt_["ifMetagenerationMatch"]; ok { + params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifMetagenerationNotMatch"]; ok { + params.Set("ifMetagenerationNotMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["predefinedAcl"]; ok { + params.Set("predefinedAcl", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["predefinedDefaultObjectAcl"]; ok { + params.Set("predefinedDefaultObjectAcl", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["projection"]; ok { + params.Set("projection", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["fields"]; ok { + params.Set("fields", fmt.Sprintf("%v", v)) + } + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") + urls += "?" + params.Encode() + req, _ := http.NewRequest("PUT", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + return c.s.client.Do(req) +} + +func (c *BucketsUpdateCall) Do() (*Bucket, error) { + res, err := c.doRequest("json") + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + var ret *Bucket + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a bucket.", + // "httpMethod": "PUT", + // "id": "storage.buckets.update", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "private", + // "projectPrivate", + // "publicRead", + // "publicReadWrite" + // ], + // "enumDescriptions": [ + // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + // "Project team owners get OWNER access.", + // "Project team members get access according to their roles.", + // "Project team owners get OWNER access, and allUsers get READER access.", + // "Project team owners get OWNER access, and allUsers get WRITER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "predefinedDefaultObjectAcl": { + // "description": "Apply a predefined set of default object access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit acl and defaultObjectAcl properties." + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}", + // "request": { + // "$ref": "Bucket" + // }, + // "response": { + // "$ref": "Bucket" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.channels.stop": + +type ChannelsStopCall struct { + s *Service + channel *Channel + opt_ map[string]interface{} +} + +// Stop: Stop watching resources through this channel +func (r *ChannelsService) Stop(channel *Channel) *ChannelsStopCall { + c := &ChannelsStopCall{s: r.s, opt_: make(map[string]interface{})} + c.channel = channel + return c +} + +// Fields allows partial responses to be retrieved. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ChannelsStopCall) Fields(s ...googleapi.Field) *ChannelsStopCall { + c.opt_["fields"] = googleapi.CombineFields(s) + return c +} + +func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) + if err != nil { + return nil, err + } + ctype := "application/json" + params := make(url.Values) + params.Set("alt", alt) + if v, ok := c.opt_["fields"]; ok { + params.Set("fields", fmt.Sprintf("%v", v)) + } + urls := googleapi.ResolveRelative(c.s.BasePath, "channels/stop") + urls += "?" + params.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.SetOpaque(req.URL) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + return c.s.client.Do(req) +} + +func (c *ChannelsStopCall) Do() error { + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Stop watching resources through this channel", + // "httpMethod": "POST", + // "id": "storage.channels.stop", + // "path": "channels/stop", + // "request": { + // "$ref": "Channel", + // "parameterName": "resource" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.defaultObjectAccessControls.delete": + +type DefaultObjectAccessControlsDeleteCall struct { + s *Service + bucket string + entity string + opt_ map[string]interface{} +} + +// Delete: Permanently deletes the default object ACL entry for the +// specified entity on the specified bucket. +func (r *DefaultObjectAccessControlsService) Delete(bucket string, entity string) *DefaultObjectAccessControlsDeleteCall { + c := &DefaultObjectAccessControlsDeleteCall{s: r.s, opt_: make(map[string]interface{})} + c.bucket = bucket + c.entity = entity + return c +} + +// Fields allows partial responses to be retrieved. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DefaultObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsDeleteCall { + c.opt_["fields"] = googleapi.CombineFields(s) + return c +} + +func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + params := make(url.Values) + params.Set("alt", alt) + if v, ok := c.opt_["fields"]; ok { + params.Set("fields", fmt.Sprintf("%v", v)) + } + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") + urls += "?" + params.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + return c.s.client.Do(req) +} + +func (c *DefaultObjectAccessControlsDeleteCall) Do() error { + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Permanently deletes the default object ACL entry for the specified entity on the specified bucket.", + // "httpMethod": "DELETE", + // "id": "storage.defaultObjectAccessControls.delete", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/defaultObjectAcl/{entity}", + // "scopes": [ + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.defaultObjectAccessControls.get": + +type DefaultObjectAccessControlsGetCall struct { + s *Service + bucket string + entity string + opt_ map[string]interface{} +} + +// Get: Returns the default object ACL entry for the specified entity on +// the specified bucket. +func (r *DefaultObjectAccessControlsService) Get(bucket string, entity string) *DefaultObjectAccessControlsGetCall { + c := &DefaultObjectAccessControlsGetCall{s: r.s, opt_: make(map[string]interface{})} + c.bucket = bucket + c.entity = entity + return c +} + +// Fields allows partial responses to be retrieved. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DefaultObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsGetCall { + c.opt_["fields"] = googleapi.CombineFields(s) + return c +} + +func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + params := make(url.Values) + params.Set("alt", alt) + if v, ok := c.opt_["fields"]; ok { + params.Set("fields", fmt.Sprintf("%v", v)) + } + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") + urls += "?" + params.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + return c.s.client.Do(req) +} + +func (c *DefaultObjectAccessControlsGetCall) Do() (*ObjectAccessControl, error) { + res, err := c.doRequest("json") + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + var ret *ObjectAccessControl + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the default object ACL entry for the specified entity on the specified bucket.", + // "httpMethod": "GET", + // "id": "storage.defaultObjectAccessControls.get", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/defaultObjectAcl/{entity}", + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.defaultObjectAccessControls.insert": + +type DefaultObjectAccessControlsInsertCall struct { + s *Service + bucket string + objectaccesscontrol *ObjectAccessControl + opt_ map[string]interface{} +} + +// Insert: Creates a new default object ACL entry on the specified +// bucket. +func (r *DefaultObjectAccessControlsService) Insert(bucket string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsInsertCall { + c := &DefaultObjectAccessControlsInsertCall{s: r.s, opt_: make(map[string]interface{})} + c.bucket = bucket + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// Fields allows partial responses to be retrieved. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DefaultObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsInsertCall { + c.opt_["fields"] = googleapi.CombineFields(s) + return c +} + +func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + ctype := "application/json" + params := make(url.Values) + params.Set("alt", alt) + if v, ok := c.opt_["fields"]; ok { + params.Set("fields", fmt.Sprintf("%v", v)) + } + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") + urls += "?" + params.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + return c.s.client.Do(req) +} + +func (c *DefaultObjectAccessControlsInsertCall) Do() (*ObjectAccessControl, error) { + res, err := c.doRequest("json") + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + var ret *ObjectAccessControl + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new default object ACL entry on the specified bucket.", + // "httpMethod": "POST", + // "id": "storage.defaultObjectAccessControls.insert", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/defaultObjectAcl", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.defaultObjectAccessControls.list": + +type DefaultObjectAccessControlsListCall struct { + s *Service + bucket string + opt_ map[string]interface{} +} + +// List: Retrieves default object ACL entries on the specified bucket. +func (r *DefaultObjectAccessControlsService) List(bucket string) *DefaultObjectAccessControlsListCall { + c := &DefaultObjectAccessControlsListCall{s: r.s, opt_: make(map[string]interface{})} + c.bucket = bucket + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": If present, only return default ACL listing +// if the bucket's current metageneration matches this value. +func (c *DefaultObjectAccessControlsListCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *DefaultObjectAccessControlsListCall { + c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": If present, only return default ACL +// listing if the bucket's current metageneration does not match the +// given value. +func (c *DefaultObjectAccessControlsListCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *DefaultObjectAccessControlsListCall { + c.opt_["ifMetagenerationNotMatch"] = ifMetagenerationNotMatch + return c +} + +// Fields allows partial responses to be retrieved. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DefaultObjectAccessControlsListCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsListCall { + c.opt_["fields"] = googleapi.CombineFields(s) + return c +} + +func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + params := make(url.Values) + params.Set("alt", alt) + if v, ok := c.opt_["ifMetagenerationMatch"]; ok { + params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifMetagenerationNotMatch"]; ok { + params.Set("ifMetagenerationNotMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["fields"]; ok { + params.Set("fields", fmt.Sprintf("%v", v)) + } + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") + urls += "?" + params.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + return c.s.client.Do(req) +} + +func (c *DefaultObjectAccessControlsListCall) Do() (*ObjectAccessControls, error) { + res, err := c.doRequest("json") + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + var ret *ObjectAccessControls + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves default object ACL entries on the specified bucket.", + // "httpMethod": "GET", + // "id": "storage.defaultObjectAccessControls.list", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "If present, only return default ACL listing if the bucket's current metageneration matches this value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "If present, only return default ACL listing if the bucket's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/defaultObjectAcl", + // "response": { + // "$ref": "ObjectAccessControls" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.defaultObjectAccessControls.patch": + +type DefaultObjectAccessControlsPatchCall struct { + s *Service + bucket string + entity string + objectaccesscontrol *ObjectAccessControl + opt_ map[string]interface{} +} + +// Patch: Updates a default object ACL entry on the specified bucket. +// This method supports patch semantics. +func (r *DefaultObjectAccessControlsService) Patch(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsPatchCall { + c := &DefaultObjectAccessControlsPatchCall{s: r.s, opt_: make(map[string]interface{})} + c.bucket = bucket + c.entity = entity + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// Fields allows partial responses to be retrieved. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DefaultObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsPatchCall { + c.opt_["fields"] = googleapi.CombineFields(s) + return c +} + +func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + ctype := "application/json" + params := make(url.Values) + params.Set("alt", alt) + if v, ok := c.opt_["fields"]; ok { + params.Set("fields", fmt.Sprintf("%v", v)) + } + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") + urls += "?" + params.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + return c.s.client.Do(req) +} + +func (c *DefaultObjectAccessControlsPatchCall) Do() (*ObjectAccessControl, error) { + res, err := c.doRequest("json") + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + var ret *ObjectAccessControl + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a default object ACL entry on the specified bucket. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "storage.defaultObjectAccessControls.patch", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/defaultObjectAcl/{entity}", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.defaultObjectAccessControls.update": + +type DefaultObjectAccessControlsUpdateCall struct { + s *Service + bucket string + entity string + objectaccesscontrol *ObjectAccessControl + opt_ map[string]interface{} +} + +// Update: Updates a default object ACL entry on the specified bucket. +func (r *DefaultObjectAccessControlsService) Update(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsUpdateCall { + c := &DefaultObjectAccessControlsUpdateCall{s: r.s, opt_: make(map[string]interface{})} + c.bucket = bucket + c.entity = entity + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// Fields allows partial responses to be retrieved. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DefaultObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsUpdateCall { + c.opt_["fields"] = googleapi.CombineFields(s) + return c +} + +func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + ctype := "application/json" + params := make(url.Values) + params.Set("alt", alt) + if v, ok := c.opt_["fields"]; ok { + params.Set("fields", fmt.Sprintf("%v", v)) + } + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") + urls += "?" + params.Encode() + req, _ := http.NewRequest("PUT", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + return c.s.client.Do(req) +} + +func (c *DefaultObjectAccessControlsUpdateCall) Do() (*ObjectAccessControl, error) { + res, err := c.doRequest("json") + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + var ret *ObjectAccessControl + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a default object ACL entry on the specified bucket.", + // "httpMethod": "PUT", + // "id": "storage.defaultObjectAccessControls.update", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/defaultObjectAcl/{entity}", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.delete": + +type ObjectAccessControlsDeleteCall struct { + s *Service + bucket string + object string + entity string + opt_ map[string]interface{} +} + +// Delete: Permanently deletes the ACL entry for the specified entity on +// the specified object. +func (r *ObjectAccessControlsService) Delete(bucket string, object string, entity string) *ObjectAccessControlsDeleteCall { + c := &ObjectAccessControlsDeleteCall{s: r.s, opt_: make(map[string]interface{})} + c.bucket = bucket + c.object = object + c.entity = entity + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsDeleteCall) Generation(generation int64) *ObjectAccessControlsDeleteCall { + c.opt_["generation"] = generation + return c +} + +// Fields allows partial responses to be retrieved. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *ObjectAccessControlsDeleteCall { + c.opt_["fields"] = googleapi.CombineFields(s) + return c +} + +func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + params := make(url.Values) + params.Set("alt", alt) + if v, ok := c.opt_["generation"]; ok { + params.Set("generation", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["fields"]; ok { + params.Set("fields", fmt.Sprintf("%v", v)) + } + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls += "?" + params.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + "entity": c.entity, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + return c.s.client.Do(req) +} + +func (c *ObjectAccessControlsDeleteCall) Do() error { + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Permanently deletes the ACL entry for the specified entity on the specified object.", + // "httpMethod": "DELETE", + // "id": "storage.objectAccessControls.delete", + // "parameterOrder": [ + // "bucket", + // "object", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl/{entity}", + // "scopes": [ + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.get": + +type ObjectAccessControlsGetCall struct { + s *Service + bucket string + object string + entity string + opt_ map[string]interface{} +} + +// Get: Returns the ACL entry for the specified entity on the specified +// object. +func (r *ObjectAccessControlsService) Get(bucket string, object string, entity string) *ObjectAccessControlsGetCall { + c := &ObjectAccessControlsGetCall{s: r.s, opt_: make(map[string]interface{})} + c.bucket = bucket + c.object = object + c.entity = entity + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsGetCall) Generation(generation int64) *ObjectAccessControlsGetCall { + c.opt_["generation"] = generation + return c +} + +// Fields allows partial responses to be retrieved. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *ObjectAccessControlsGetCall { + c.opt_["fields"] = googleapi.CombineFields(s) + return c +} + +func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + params := make(url.Values) + params.Set("alt", alt) + if v, ok := c.opt_["generation"]; ok { + params.Set("generation", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["fields"]; ok { + params.Set("fields", fmt.Sprintf("%v", v)) + } + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls += "?" + params.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + "entity": c.entity, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + return c.s.client.Do(req) +} + +func (c *ObjectAccessControlsGetCall) Do() (*ObjectAccessControl, error) { + res, err := c.doRequest("json") + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + var ret *ObjectAccessControl + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the ACL entry for the specified entity on the specified object.", + // "httpMethod": "GET", + // "id": "storage.objectAccessControls.get", + // "parameterOrder": [ + // "bucket", + // "object", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl/{entity}", + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.insert": + +type ObjectAccessControlsInsertCall struct { + s *Service + bucket string + object string + objectaccesscontrol *ObjectAccessControl + opt_ map[string]interface{} +} + +// Insert: Creates a new ACL entry on the specified object. +func (r *ObjectAccessControlsService) Insert(bucket string, object string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsInsertCall { + c := &ObjectAccessControlsInsertCall{s: r.s, opt_: make(map[string]interface{})} + c.bucket = bucket + c.object = object + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsInsertCall) Generation(generation int64) *ObjectAccessControlsInsertCall { + c.opt_["generation"] = generation + return c +} + +// Fields allows partial responses to be retrieved. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *ObjectAccessControlsInsertCall { + c.opt_["fields"] = googleapi.CombineFields(s) + return c +} + +func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + ctype := "application/json" + params := make(url.Values) + params.Set("alt", alt) + if v, ok := c.opt_["generation"]; ok { + params.Set("generation", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["fields"]; ok { + params.Set("fields", fmt.Sprintf("%v", v)) + } + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") + urls += "?" + params.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + return c.s.client.Do(req) +} + +func (c *ObjectAccessControlsInsertCall) Do() (*ObjectAccessControl, error) { + res, err := c.doRequest("json") + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + var ret *ObjectAccessControl + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new ACL entry on the specified object.", + // "httpMethod": "POST", + // "id": "storage.objectAccessControls.insert", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.list": + +type ObjectAccessControlsListCall struct { + s *Service + bucket string + object string + opt_ map[string]interface{} +} + +// List: Retrieves ACL entries on the specified object. +func (r *ObjectAccessControlsService) List(bucket string, object string) *ObjectAccessControlsListCall { + c := &ObjectAccessControlsListCall{s: r.s, opt_: make(map[string]interface{})} + c.bucket = bucket + c.object = object + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsListCall) Generation(generation int64) *ObjectAccessControlsListCall { + c.opt_["generation"] = generation + return c +} + +// Fields allows partial responses to be retrieved. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsListCall) Fields(s ...googleapi.Field) *ObjectAccessControlsListCall { + c.opt_["fields"] = googleapi.CombineFields(s) + return c +} + +func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + params := make(url.Values) + params.Set("alt", alt) + if v, ok := c.opt_["generation"]; ok { + params.Set("generation", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["fields"]; ok { + params.Set("fields", fmt.Sprintf("%v", v)) + } + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") + urls += "?" + params.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + return c.s.client.Do(req) +} + +func (c *ObjectAccessControlsListCall) Do() (*ObjectAccessControls, error) { + res, err := c.doRequest("json") + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + var ret *ObjectAccessControls + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves ACL entries on the specified object.", + // "httpMethod": "GET", + // "id": "storage.objectAccessControls.list", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl", + // "response": { + // "$ref": "ObjectAccessControls" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.patch": + +type ObjectAccessControlsPatchCall struct { + s *Service + bucket string + object string + entity string + objectaccesscontrol *ObjectAccessControl + opt_ map[string]interface{} +} + +// Patch: Updates an ACL entry on the specified object. This method +// supports patch semantics. +func (r *ObjectAccessControlsService) Patch(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsPatchCall { + c := &ObjectAccessControlsPatchCall{s: r.s, opt_: make(map[string]interface{})} + c.bucket = bucket + c.object = object + c.entity = entity + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsPatchCall) Generation(generation int64) *ObjectAccessControlsPatchCall { + c.opt_["generation"] = generation + return c +} + +// Fields allows partial responses to be retrieved. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *ObjectAccessControlsPatchCall { + c.opt_["fields"] = googleapi.CombineFields(s) + return c +} + +func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + ctype := "application/json" + params := make(url.Values) + params.Set("alt", alt) + if v, ok := c.opt_["generation"]; ok { + params.Set("generation", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["fields"]; ok { + params.Set("fields", fmt.Sprintf("%v", v)) + } + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls += "?" + params.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + "entity": c.entity, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + return c.s.client.Do(req) +} + +func (c *ObjectAccessControlsPatchCall) Do() (*ObjectAccessControl, error) { + res, err := c.doRequest("json") + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + var ret *ObjectAccessControl + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an ACL entry on the specified object. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "storage.objectAccessControls.patch", + // "parameterOrder": [ + // "bucket", + // "object", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl/{entity}", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.update": + +type ObjectAccessControlsUpdateCall struct { + s *Service + bucket string + object string + entity string + objectaccesscontrol *ObjectAccessControl + opt_ map[string]interface{} +} + +// Update: Updates an ACL entry on the specified object. +func (r *ObjectAccessControlsService) Update(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsUpdateCall { + c := &ObjectAccessControlsUpdateCall{s: r.s, opt_: make(map[string]interface{})} + c.bucket = bucket + c.object = object + c.entity = entity + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsUpdateCall) Generation(generation int64) *ObjectAccessControlsUpdateCall { + c.opt_["generation"] = generation + return c +} + +// Fields allows partial responses to be retrieved. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *ObjectAccessControlsUpdateCall { + c.opt_["fields"] = googleapi.CombineFields(s) + return c +} + +func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + ctype := "application/json" + params := make(url.Values) + params.Set("alt", alt) + if v, ok := c.opt_["generation"]; ok { + params.Set("generation", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["fields"]; ok { + params.Set("fields", fmt.Sprintf("%v", v)) + } + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls += "?" + params.Encode() + req, _ := http.NewRequest("PUT", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + "entity": c.entity, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + return c.s.client.Do(req) +} + +func (c *ObjectAccessControlsUpdateCall) Do() (*ObjectAccessControl, error) { + res, err := c.doRequest("json") + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + var ret *ObjectAccessControl + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an ACL entry on the specified object.", + // "httpMethod": "PUT", + // "id": "storage.objectAccessControls.update", + // "parameterOrder": [ + // "bucket", + // "object", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl/{entity}", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objects.compose": + +type ObjectsComposeCall struct { + s *Service + destinationBucket string + destinationObject string + composerequest *ComposeRequest + opt_ map[string]interface{} +} + +// Compose: Concatenates a list of existing objects into a new object in +// the same bucket. +func (r *ObjectsService) Compose(destinationBucket string, destinationObject string, composerequest *ComposeRequest) *ObjectsComposeCall { + c := &ObjectsComposeCall{s: r.s, opt_: make(map[string]interface{})} + c.destinationBucket = destinationBucket + c.destinationObject = destinationObject + c.composerequest = composerequest + return c +} + +// DestinationPredefinedAcl sets the optional parameter +// "destinationPredefinedAcl": Apply a predefined set of access controls +// to the destination object. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *ObjectsComposeCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsComposeCall { + c.opt_["destinationPredefinedAcl"] = destinationPredefinedAcl + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. +func (c *ObjectsComposeCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsComposeCall { + c.opt_["ifGenerationMatch"] = ifGenerationMatch + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsComposeCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsComposeCall { + c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch + return c +} + +// Fields allows partial responses to be retrieved. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsComposeCall) Fields(s ...googleapi.Field) *ObjectsComposeCall { + c.opt_["fields"] = googleapi.CombineFields(s) + return c +} + +func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.composerequest) + if err != nil { + return nil, err + } + ctype := "application/json" + params := make(url.Values) + params.Set("alt", alt) + if v, ok := c.opt_["destinationPredefinedAcl"]; ok { + params.Set("destinationPredefinedAcl", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifGenerationMatch"]; ok { + params.Set("ifGenerationMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifMetagenerationMatch"]; ok { + params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["fields"]; ok { + params.Set("fields", fmt.Sprintf("%v", v)) + } + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{destinationBucket}/o/{destinationObject}/compose") + urls += "?" + params.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "destinationBucket": c.destinationBucket, + "destinationObject": c.destinationObject, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + return c.s.client.Do(req) +} + +// Download fetches the API endpoint's "media" value, instead of the normal +// API response value. If the returned error is nil, the Response is guaranteed to +// have a 2xx status code. Callers must close the Response.Body as usual. +func (c *ObjectsComposeCall) Download() (*http.Response, error) { + res, err := c.doRequest("media") + if err != nil { + return nil, err + } + if err := googleapi.CheckMediaResponse(res); err != nil { + res.Body.Close() + return nil, err + } + return res, nil +} + +func (c *ObjectsComposeCall) Do() (*Object, error) { + res, err := c.doRequest("json") + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + var ret *Object + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Concatenates a list of existing objects into a new object in the same bucket.", + // "httpMethod": "POST", + // "id": "storage.objects.compose", + // "parameterOrder": [ + // "destinationBucket", + // "destinationObject" + // ], + // "parameters": { + // "destinationBucket": { + // "description": "Name of the bucket in which to store the new object.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationObject": { + // "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationPredefinedAcl": { + // "description": "Apply a predefined set of access controls to the destination object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{destinationBucket}/o/{destinationObject}/compose", + // "request": { + // "$ref": "ComposeRequest" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsMediaDownload": true + // } + +} + +// method id "storage.objects.copy": + +type ObjectsCopyCall struct { + s *Service + sourceBucket string + sourceObject string + destinationBucket string + destinationObject string + object *Object + opt_ map[string]interface{} +} + +// Copy: Copies a source object to a destination object. Optionally +// overrides metadata. +func (r *ObjectsService) Copy(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsCopyCall { + c := &ObjectsCopyCall{s: r.s, opt_: make(map[string]interface{})} + c.sourceBucket = sourceBucket + c.sourceObject = sourceObject + c.destinationBucket = destinationBucket + c.destinationObject = destinationObject + c.object = object + return c +} + +// DestinationPredefinedAcl sets the optional parameter +// "destinationPredefinedAcl": Apply a predefined set of access controls +// to the destination object. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *ObjectsCopyCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsCopyCall { + c.opt_["destinationPredefinedAcl"] = destinationPredefinedAcl + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the destination object's +// current generation matches the given value. +func (c *ObjectsCopyCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsCopyCall { + c.opt_["ifGenerationMatch"] = ifGenerationMatch + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the destination object's current generation does not match the given +// value. +func (c *ObjectsCopyCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsCopyCall { + c.opt_["ifGenerationNotMatch"] = ifGenerationNotMatch + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the destination object's current metageneration matches the given +// value. +func (c *ObjectsCopyCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsCopyCall { + c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the destination object's current metageneration does not +// match the given value. +func (c *ObjectsCopyCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsCopyCall { + c.opt_["ifMetagenerationNotMatch"] = ifMetagenerationNotMatch + return c +} + +// IfSourceGenerationMatch sets the optional parameter +// "ifSourceGenerationMatch": Makes the operation conditional on whether +// the source object's generation matches the given value. +func (c *ObjectsCopyCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsCopyCall { + c.opt_["ifSourceGenerationMatch"] = ifSourceGenerationMatch + return c +} + +// IfSourceGenerationNotMatch sets the optional parameter +// "ifSourceGenerationNotMatch": Makes the operation conditional on +// whether the source object's generation does not match the given +// value. +func (c *ObjectsCopyCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsCopyCall { + c.opt_["ifSourceGenerationNotMatch"] = ifSourceGenerationNotMatch + return c +} + +// IfSourceMetagenerationMatch sets the optional parameter +// "ifSourceMetagenerationMatch": Makes the operation conditional on +// whether the source object's current metageneration matches the given +// value. +func (c *ObjectsCopyCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsCopyCall { + c.opt_["ifSourceMetagenerationMatch"] = ifSourceMetagenerationMatch + return c +} + +// IfSourceMetagenerationNotMatch sets the optional parameter +// "ifSourceMetagenerationNotMatch": Makes the operation conditional on +// whether the source object's current metageneration does not match the +// given value. +func (c *ObjectsCopyCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsCopyCall { + c.opt_["ifSourceMetagenerationNotMatch"] = ifSourceMetagenerationNotMatch + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl, unless the object resource +// specifies the acl property, when it defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the acl property. +func (c *ObjectsCopyCall) Projection(projection string) *ObjectsCopyCall { + c.opt_["projection"] = projection + return c +} + +// SourceGeneration sets the optional parameter "sourceGeneration": If +// present, selects a specific revision of the source object (as opposed +// to the latest version, the default). +func (c *ObjectsCopyCall) SourceGeneration(sourceGeneration int64) *ObjectsCopyCall { + c.opt_["sourceGeneration"] = sourceGeneration + return c +} + +// Fields allows partial responses to be retrieved. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsCopyCall) Fields(s ...googleapi.Field) *ObjectsCopyCall { + c.opt_["fields"] = googleapi.CombineFields(s) + return c +} + +func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) + if err != nil { + return nil, err + } + ctype := "application/json" + params := make(url.Values) + params.Set("alt", alt) + if v, ok := c.opt_["destinationPredefinedAcl"]; ok { + params.Set("destinationPredefinedAcl", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifGenerationMatch"]; ok { + params.Set("ifGenerationMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifGenerationNotMatch"]; ok { + params.Set("ifGenerationNotMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifMetagenerationMatch"]; ok { + params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifMetagenerationNotMatch"]; ok { + params.Set("ifMetagenerationNotMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifSourceGenerationMatch"]; ok { + params.Set("ifSourceGenerationMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifSourceGenerationNotMatch"]; ok { + params.Set("ifSourceGenerationNotMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifSourceMetagenerationMatch"]; ok { + params.Set("ifSourceMetagenerationMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifSourceMetagenerationNotMatch"]; ok { + params.Set("ifSourceMetagenerationNotMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["projection"]; ok { + params.Set("projection", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["sourceGeneration"]; ok { + params.Set("sourceGeneration", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["fields"]; ok { + params.Set("fields", fmt.Sprintf("%v", v)) + } + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}") + urls += "?" + params.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "sourceBucket": c.sourceBucket, + "sourceObject": c.sourceObject, + "destinationBucket": c.destinationBucket, + "destinationObject": c.destinationObject, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + return c.s.client.Do(req) +} + +// Download fetches the API endpoint's "media" value, instead of the normal +// API response value. If the returned error is nil, the Response is guaranteed to +// have a 2xx status code. Callers must close the Response.Body as usual. +func (c *ObjectsCopyCall) Download() (*http.Response, error) { + res, err := c.doRequest("media") + if err != nil { + return nil, err + } + if err := googleapi.CheckMediaResponse(res); err != nil { + res.Body.Close() + return nil, err + } + return res, nil +} + +func (c *ObjectsCopyCall) Do() (*Object, error) { + res, err := c.doRequest("json") + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + var ret *Object + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Copies a source object to a destination object. Optionally overrides metadata.", + // "httpMethod": "POST", + // "id": "storage.objects.copy", + // "parameterOrder": [ + // "sourceBucket", + // "sourceObject", + // "destinationBucket", + // "destinationObject" + // ], + // "parameters": { + // "destinationBucket": { + // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationObject": { + // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationPredefinedAcl": { + // "description": "Apply a predefined set of access controls to the destination object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the destination object's current generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceGenerationMatch": { + // "description": "Makes the operation conditional on whether the source object's generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the source object's generation does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "sourceBucket": { + // "description": "Name of the bucket in which to find the source object.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "sourceGeneration": { + // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "sourceObject": { + // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsMediaDownload": true + // } + +} + +// method id "storage.objects.delete": + +type ObjectsDeleteCall struct { + s *Service + bucket string + object string + opt_ map[string]interface{} +} + +// Delete: Deletes an object and its metadata. Deletions are permanent +// if versioning is not enabled for the bucket, or if the generation +// parameter is used. +func (r *ObjectsService) Delete(bucket string, object string) *ObjectsDeleteCall { + c := &ObjectsDeleteCall{s: r.s, opt_: make(map[string]interface{})} + c.bucket = bucket + c.object = object + return c +} + +// Generation sets the optional parameter "generation": If present, +// permanently deletes a specific revision of this object (as opposed to +// the latest version, the default). +func (c *ObjectsDeleteCall) Generation(generation int64) *ObjectsDeleteCall { + c.opt_["generation"] = generation + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. +func (c *ObjectsDeleteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsDeleteCall { + c.opt_["ifGenerationMatch"] = ifGenerationMatch + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. +func (c *ObjectsDeleteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsDeleteCall { + c.opt_["ifGenerationNotMatch"] = ifGenerationNotMatch + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsDeleteCall { + c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsDeleteCall { + c.opt_["ifMetagenerationNotMatch"] = ifMetagenerationNotMatch + return c +} + +// Fields allows partial responses to be retrieved. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsDeleteCall) Fields(s ...googleapi.Field) *ObjectsDeleteCall { + c.opt_["fields"] = googleapi.CombineFields(s) + return c +} + +func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + params := make(url.Values) + params.Set("alt", alt) + if v, ok := c.opt_["generation"]; ok { + params.Set("generation", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifGenerationMatch"]; ok { + params.Set("ifGenerationMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifGenerationNotMatch"]; ok { + params.Set("ifGenerationNotMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifMetagenerationMatch"]; ok { + params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifMetagenerationNotMatch"]; ok { + params.Set("ifMetagenerationNotMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["fields"]; ok { + params.Set("fields", fmt.Sprintf("%v", v)) + } + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls += "?" + params.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + return c.s.client.Do(req) +} + +func (c *ObjectsDeleteCall) Do() error { + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.", + // "httpMethod": "DELETE", + // "id": "storage.objects.delete", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.get": + +type ObjectsGetCall struct { + s *Service + bucket string + object string + opt_ map[string]interface{} +} + +// Get: Retrieves an object or its metadata. +func (r *ObjectsService) Get(bucket string, object string) *ObjectsGetCall { + c := &ObjectsGetCall{s: r.s, opt_: make(map[string]interface{})} + c.bucket = bucket + c.object = object + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectsGetCall) Generation(generation int64) *ObjectsGetCall { + c.opt_["generation"] = generation + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's generation +// matches the given value. +func (c *ObjectsGetCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsGetCall { + c.opt_["ifGenerationMatch"] = ifGenerationMatch + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's generation does not match the given value. +func (c *ObjectsGetCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsGetCall { + c.opt_["ifGenerationNotMatch"] = ifGenerationNotMatch + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsGetCall { + c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsGetCall { + c.opt_["ifMetagenerationNotMatch"] = ifMetagenerationNotMatch + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the acl property. +func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall { + c.opt_["projection"] = projection + return c +} + +// Fields allows partial responses to be retrieved. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsGetCall) Fields(s ...googleapi.Field) *ObjectsGetCall { + c.opt_["fields"] = googleapi.CombineFields(s) + return c +} + +func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + params := make(url.Values) + params.Set("alt", alt) + if v, ok := c.opt_["generation"]; ok { + params.Set("generation", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifGenerationMatch"]; ok { + params.Set("ifGenerationMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifGenerationNotMatch"]; ok { + params.Set("ifGenerationNotMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifMetagenerationMatch"]; ok { + params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifMetagenerationNotMatch"]; ok { + params.Set("ifMetagenerationNotMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["projection"]; ok { + params.Set("projection", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["fields"]; ok { + params.Set("fields", fmt.Sprintf("%v", v)) + } + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls += "?" + params.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + return c.s.client.Do(req) +} + +// Download fetches the API endpoint's "media" value, instead of the normal +// API response value. If the returned error is nil, the Response is guaranteed to +// have a 2xx status code. Callers must close the Response.Body as usual. +func (c *ObjectsGetCall) Download() (*http.Response, error) { + res, err := c.doRequest("media") + if err != nil { + return nil, err + } + if err := googleapi.CheckMediaResponse(res); err != nil { + res.Body.Close() + return nil, err + } + return res, nil +} + +func (c *ObjectsGetCall) Do() (*Object, error) { + res, err := c.doRequest("json") + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + var ret *Object + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an object or its metadata.", + // "httpMethod": "GET", + // "id": "storage.objects.get", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's generation does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the acl property." + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}", + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsMediaDownload": true + // } + +} + +// method id "storage.objects.insert": + +type ObjectsInsertCall struct { + s *Service + bucket string + object *Object + opt_ map[string]interface{} + media_ io.Reader + resumable_ googleapi.SizeReaderAt + mediaType_ string + ctx_ context.Context + protocol_ string +} + +// Insert: Stores a new object and metadata. +func (r *ObjectsService) Insert(bucket string, object *Object) *ObjectsInsertCall { + c := &ObjectsInsertCall{s: r.s, opt_: make(map[string]interface{})} + c.bucket = bucket + c.object = object + return c +} + +// ContentEncoding sets the optional parameter "contentEncoding": If +// set, sets the contentEncoding property of the final object to this +// value. Setting this parameter is equivalent to setting the +// contentEncoding metadata property. This can be useful when uploading +// an object with uploadType=media to indicate the encoding of the +// content being uploaded. +func (c *ObjectsInsertCall) ContentEncoding(contentEncoding string) *ObjectsInsertCall { + c.opt_["contentEncoding"] = contentEncoding + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. +func (c *ObjectsInsertCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsInsertCall { + c.opt_["ifGenerationMatch"] = ifGenerationMatch + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. +func (c *ObjectsInsertCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsInsertCall { + c.opt_["ifGenerationNotMatch"] = ifGenerationNotMatch + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsInsertCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsInsertCall { + c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsInsertCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsInsertCall { + c.opt_["ifMetagenerationNotMatch"] = ifMetagenerationNotMatch + return c +} + +// Name sets the optional parameter "name": Name of the object. Required +// when the object metadata is not otherwise provided. Overrides the +// object metadata's name value, if any. For information about how to +// URL encode object names to be path safe, see Encoding URI Path Parts. +func (c *ObjectsInsertCall) Name(name string) *ObjectsInsertCall { + c.opt_["name"] = name + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this object. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *ObjectsInsertCall) PredefinedAcl(predefinedAcl string) *ObjectsInsertCall { + c.opt_["predefinedAcl"] = predefinedAcl + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl, unless the object resource +// specifies the acl property, when it defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the acl property. +func (c *ObjectsInsertCall) Projection(projection string) *ObjectsInsertCall { + c.opt_["projection"] = projection + return c +} + +// Media specifies the media to upload in a single chunk. +// At most one of Media and ResumableMedia may be set. +func (c *ObjectsInsertCall) Media(r io.Reader) *ObjectsInsertCall { + c.media_ = r + c.protocol_ = "multipart" + return c +} + +// ResumableMedia specifies the media to upload in chunks and can be cancelled with ctx. +// At most one of Media and ResumableMedia may be set. +// mediaType identifies the MIME media type of the upload, such as "image/png". +// If mediaType is "", it will be auto-detected. +func (c *ObjectsInsertCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *ObjectsInsertCall { + c.ctx_ = ctx + c.resumable_ = io.NewSectionReader(r, 0, size) + c.mediaType_ = mediaType + c.protocol_ = "resumable" + return c +} + +// ProgressUpdater provides a callback function that will be called after every chunk. +// It should be a low-latency function in order to not slow down the upload operation. +// This should only be called when using ResumableMedia (as opposed to Media). +func (c *ObjectsInsertCall) ProgressUpdater(pu googleapi.ProgressUpdater) *ObjectsInsertCall { + c.opt_["progressUpdater"] = pu + return c +} + +// Fields allows partial responses to be retrieved. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsInsertCall) Fields(s ...googleapi.Field) *ObjectsInsertCall { + c.opt_["fields"] = googleapi.CombineFields(s) + return c +} + +func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) + if err != nil { + return nil, err + } + ctype := "application/json" + params := make(url.Values) + params.Set("alt", alt) + if v, ok := c.opt_["contentEncoding"]; ok { + params.Set("contentEncoding", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifGenerationMatch"]; ok { + params.Set("ifGenerationMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifGenerationNotMatch"]; ok { + params.Set("ifGenerationNotMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifMetagenerationMatch"]; ok { + params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifMetagenerationNotMatch"]; ok { + params.Set("ifMetagenerationNotMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["name"]; ok { + params.Set("name", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["predefinedAcl"]; ok { + params.Set("predefinedAcl", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["projection"]; ok { + params.Set("projection", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["fields"]; ok { + params.Set("fields", fmt.Sprintf("%v", v)) + } + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") + if c.media_ != nil || c.resumable_ != nil { + urls = strings.Replace(urls, "https://www.googleapis.com/", "https://www.googleapis.com/upload/", 1) + params.Set("uploadType", c.protocol_) + } + urls += "?" + params.Encode() + if c.protocol_ != "resumable" { + var cancel func() + cancel, _ = googleapi.ConditionallyIncludeMedia(c.media_, &body, &ctype) + if cancel != nil { + defer cancel() + } + } + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + if c.protocol_ == "resumable" { + if c.mediaType_ == "" { + c.mediaType_ = googleapi.DetectMediaType(c.resumable_) + } + req.Header.Set("X-Upload-Content-Type", c.mediaType_) + req.Header.Set("Content-Type", "application/json; charset=utf-8") + } else { + req.Header.Set("Content-Type", ctype) + } + req.Header.Set("User-Agent", c.s.userAgent()) + return c.s.client.Do(req) +} + +func (c *ObjectsInsertCall) Do() (*Object, error) { + res, err := c.doRequest("json") + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + var progressUpdater_ googleapi.ProgressUpdater + if v, ok := c.opt_["progressUpdater"]; ok { + if pu, ok := v.(googleapi.ProgressUpdater); ok { + progressUpdater_ = pu + } + } + if c.protocol_ == "resumable" { + loc := res.Header.Get("Location") + rx := &googleapi.ResumableUpload{ + Client: c.s.client, + UserAgent: c.s.userAgent(), + URI: loc, + Media: c.resumable_, + MediaType: c.mediaType_, + ContentLength: c.resumable_.Size(), + Callback: progressUpdater_, + } + res, err = rx.Upload(c.ctx_) + if err != nil { + return nil, err + } + defer res.Body.Close() + } + var ret *Object + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Stores a new object and metadata.", + // "httpMethod": "POST", + // "id": "storage.objects.insert", + // "mediaUpload": { + // "accept": [ + // "*/*" + // ], + // "protocols": { + // "resumable": { + // "multipart": true, + // "path": "/resumable/upload/storage/v1/b/{bucket}/o" + // }, + // "simple": { + // "multipart": true, + // "path": "/upload/storage/v1/b/{bucket}/o" + // } + // } + // }, + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "contentEncoding": { + // "description": "If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "name": { + // "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "query", + // "type": "string" + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the acl property." + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsMediaDownload": true, + // "supportsMediaUpload": true + // } + +} + +// method id "storage.objects.list": + +type ObjectsListCall struct { + s *Service + bucket string + opt_ map[string]interface{} +} + +// List: Retrieves a list of objects matching the criteria. +func (r *ObjectsService) List(bucket string) *ObjectsListCall { + c := &ObjectsListCall{s: r.s, opt_: make(map[string]interface{})} + c.bucket = bucket + return c +} + +// Delimiter sets the optional parameter "delimiter": Returns results in +// a directory-like mode. items will contain only objects whose names, +// aside from the prefix, do not contain delimiter. Objects whose names, +// aside from the prefix, contain delimiter will have their name, +// truncated after the delimiter, returned in prefixes. Duplicate +// prefixes are omitted. +func (c *ObjectsListCall) Delimiter(delimiter string) *ObjectsListCall { + c.opt_["delimiter"] = delimiter + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of items plus prefixes to return. As duplicate prefixes are omitted, +// fewer total results may be returned than requested. The default value +// of this parameter is 1,000 items. +func (c *ObjectsListCall) MaxResults(maxResults int64) *ObjectsListCall { + c.opt_["maxResults"] = maxResults + return c +} + +// PageToken sets the optional parameter "pageToken": A +// previously-returned page token representing part of the larger set of +// results to view. +func (c *ObjectsListCall) PageToken(pageToken string) *ObjectsListCall { + c.opt_["pageToken"] = pageToken + return c +} + +// Prefix sets the optional parameter "prefix": Filter results to +// objects whose names begin with this prefix. +func (c *ObjectsListCall) Prefix(prefix string) *ObjectsListCall { + c.opt_["prefix"] = prefix + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the acl property. +func (c *ObjectsListCall) Projection(projection string) *ObjectsListCall { + c.opt_["projection"] = projection + return c +} + +// Versions sets the optional parameter "versions": If true, lists all +// versions of an object as distinct results. The default is false. For +// more information, see Object Versioning. +func (c *ObjectsListCall) Versions(versions bool) *ObjectsListCall { + c.opt_["versions"] = versions + return c +} + +// Fields allows partial responses to be retrieved. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsListCall) Fields(s ...googleapi.Field) *ObjectsListCall { + c.opt_["fields"] = googleapi.CombineFields(s) + return c +} + +func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + params := make(url.Values) + params.Set("alt", alt) + if v, ok := c.opt_["delimiter"]; ok { + params.Set("delimiter", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["maxResults"]; ok { + params.Set("maxResults", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["pageToken"]; ok { + params.Set("pageToken", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["prefix"]; ok { + params.Set("prefix", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["projection"]; ok { + params.Set("projection", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["versions"]; ok { + params.Set("versions", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["fields"]; ok { + params.Set("fields", fmt.Sprintf("%v", v)) + } + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") + urls += "?" + params.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + return c.s.client.Do(req) +} + +func (c *ObjectsListCall) Do() (*Objects, error) { + res, err := c.doRequest("json") + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + var ret *Objects + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of objects matching the criteria.", + // "httpMethod": "GET", + // "id": "storage.objects.list", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which to look for objects.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "delimiter": { + // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "description": "Maximum number of items plus prefixes to return. As duplicate prefixes are omitted, fewer total results may be returned than requested. The default value of this parameter is 1,000 items.", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A previously-returned page token representing part of the larger set of results to view.", + // "location": "query", + // "type": "string" + // }, + // "prefix": { + // "description": "Filter results to objects whose names begin with this prefix.", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "versions": { + // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "b/{bucket}/o", + // "response": { + // "$ref": "Objects" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsSubscription": true + // } + +} + +// method id "storage.objects.patch": + +type ObjectsPatchCall struct { + s *Service + bucket string + object string + object2 *Object + opt_ map[string]interface{} +} + +// Patch: Updates an object's metadata. This method supports patch +// semantics. +func (r *ObjectsService) Patch(bucket string, object string, object2 *Object) *ObjectsPatchCall { + c := &ObjectsPatchCall{s: r.s, opt_: make(map[string]interface{})} + c.bucket = bucket + c.object = object + c.object2 = object2 + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectsPatchCall) Generation(generation int64) *ObjectsPatchCall { + c.opt_["generation"] = generation + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. +func (c *ObjectsPatchCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsPatchCall { + c.opt_["ifGenerationMatch"] = ifGenerationMatch + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. +func (c *ObjectsPatchCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsPatchCall { + c.opt_["ifGenerationNotMatch"] = ifGenerationNotMatch + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsPatchCall { + c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsPatchCall { + c.opt_["ifMetagenerationNotMatch"] = ifMetagenerationNotMatch + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this object. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *ObjectsPatchCall) PredefinedAcl(predefinedAcl string) *ObjectsPatchCall { + c.opt_["predefinedAcl"] = predefinedAcl + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the acl property. +func (c *ObjectsPatchCall) Projection(projection string) *ObjectsPatchCall { + c.opt_["projection"] = projection + return c +} + +// Fields allows partial responses to be retrieved. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsPatchCall) Fields(s ...googleapi.Field) *ObjectsPatchCall { + c.opt_["fields"] = googleapi.CombineFields(s) + return c +} + +func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) + if err != nil { + return nil, err + } + ctype := "application/json" + params := make(url.Values) + params.Set("alt", alt) + if v, ok := c.opt_["generation"]; ok { + params.Set("generation", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifGenerationMatch"]; ok { + params.Set("ifGenerationMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifGenerationNotMatch"]; ok { + params.Set("ifGenerationNotMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifMetagenerationMatch"]; ok { + params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifMetagenerationNotMatch"]; ok { + params.Set("ifMetagenerationNotMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["predefinedAcl"]; ok { + params.Set("predefinedAcl", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["projection"]; ok { + params.Set("projection", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["fields"]; ok { + params.Set("fields", fmt.Sprintf("%v", v)) + } + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls += "?" + params.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + return c.s.client.Do(req) +} + +func (c *ObjectsPatchCall) Do() (*Object, error) { + res, err := c.doRequest("json") + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + var ret *Object + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an object's metadata. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "storage.objects.patch", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the acl property." + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.rewrite": + +type ObjectsRewriteCall struct { + s *Service + sourceBucket string + sourceObject string + destinationBucket string + destinationObject string + object *Object + opt_ map[string]interface{} +} + +// Rewrite: Rewrites a source object to a destination object. Optionally +// overrides metadata. +func (r *ObjectsService) Rewrite(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsRewriteCall { + c := &ObjectsRewriteCall{s: r.s, opt_: make(map[string]interface{})} + c.sourceBucket = sourceBucket + c.sourceObject = sourceObject + c.destinationBucket = destinationBucket + c.destinationObject = destinationObject + c.object = object + return c +} + +// DestinationPredefinedAcl sets the optional parameter +// "destinationPredefinedAcl": Apply a predefined set of access controls +// to the destination object. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *ObjectsRewriteCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsRewriteCall { + c.opt_["destinationPredefinedAcl"] = destinationPredefinedAcl + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the destination object's +// current generation matches the given value. +func (c *ObjectsRewriteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsRewriteCall { + c.opt_["ifGenerationMatch"] = ifGenerationMatch + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the destination object's current generation does not match the given +// value. +func (c *ObjectsRewriteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsRewriteCall { + c.opt_["ifGenerationNotMatch"] = ifGenerationNotMatch + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the destination object's current metageneration matches the given +// value. +func (c *ObjectsRewriteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsRewriteCall { + c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the destination object's current metageneration does not +// match the given value. +func (c *ObjectsRewriteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsRewriteCall { + c.opt_["ifMetagenerationNotMatch"] = ifMetagenerationNotMatch + return c +} + +// IfSourceGenerationMatch sets the optional parameter +// "ifSourceGenerationMatch": Makes the operation conditional on whether +// the source object's generation matches the given value. +func (c *ObjectsRewriteCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsRewriteCall { + c.opt_["ifSourceGenerationMatch"] = ifSourceGenerationMatch + return c +} + +// IfSourceGenerationNotMatch sets the optional parameter +// "ifSourceGenerationNotMatch": Makes the operation conditional on +// whether the source object's generation does not match the given +// value. +func (c *ObjectsRewriteCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsRewriteCall { + c.opt_["ifSourceGenerationNotMatch"] = ifSourceGenerationNotMatch + return c +} + +// IfSourceMetagenerationMatch sets the optional parameter +// "ifSourceMetagenerationMatch": Makes the operation conditional on +// whether the source object's current metageneration matches the given +// value. +func (c *ObjectsRewriteCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsRewriteCall { + c.opt_["ifSourceMetagenerationMatch"] = ifSourceMetagenerationMatch + return c +} + +// IfSourceMetagenerationNotMatch sets the optional parameter +// "ifSourceMetagenerationNotMatch": Makes the operation conditional on +// whether the source object's current metageneration does not match the +// given value. +func (c *ObjectsRewriteCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsRewriteCall { + c.opt_["ifSourceMetagenerationNotMatch"] = ifSourceMetagenerationNotMatch + return c +} + +// MaxBytesRewrittenPerCall sets the optional parameter +// "maxBytesRewrittenPerCall": The maximum number of bytes that will be +// rewritten per rewrite request. Most callers shouldn't need to specify +// this parameter - it is primarily in place to support testing. If +// specified the value must be an integral multiple of 1 MiB (1048576). +// Also, this only applies to requests where the source and destination +// span locations and/or storage classes. Finally, this value must not +// change across rewrite calls else you'll get an error that the +// rewriteToken is invalid. +func (c *ObjectsRewriteCall) MaxBytesRewrittenPerCall(maxBytesRewrittenPerCall int64) *ObjectsRewriteCall { + c.opt_["maxBytesRewrittenPerCall"] = maxBytesRewrittenPerCall + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl, unless the object resource +// specifies the acl property, when it defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the acl property. +func (c *ObjectsRewriteCall) Projection(projection string) *ObjectsRewriteCall { + c.opt_["projection"] = projection + return c +} + +// RewriteToken sets the optional parameter "rewriteToken": Include this +// field (from the previous rewrite response) on each rewrite request +// after the first one, until the rewrite response 'done' flag is true. +// Calls that provide a rewriteToken can omit all other request fields, +// but if included those fields must match the values provided in the +// first rewrite request. +func (c *ObjectsRewriteCall) RewriteToken(rewriteToken string) *ObjectsRewriteCall { + c.opt_["rewriteToken"] = rewriteToken + return c +} + +// SourceGeneration sets the optional parameter "sourceGeneration": If +// present, selects a specific revision of the source object (as opposed +// to the latest version, the default). +func (c *ObjectsRewriteCall) SourceGeneration(sourceGeneration int64) *ObjectsRewriteCall { + c.opt_["sourceGeneration"] = sourceGeneration + return c +} + +// Fields allows partial responses to be retrieved. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsRewriteCall) Fields(s ...googleapi.Field) *ObjectsRewriteCall { + c.opt_["fields"] = googleapi.CombineFields(s) + return c +} + +func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) + if err != nil { + return nil, err + } + ctype := "application/json" + params := make(url.Values) + params.Set("alt", alt) + if v, ok := c.opt_["destinationPredefinedAcl"]; ok { + params.Set("destinationPredefinedAcl", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifGenerationMatch"]; ok { + params.Set("ifGenerationMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifGenerationNotMatch"]; ok { + params.Set("ifGenerationNotMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifMetagenerationMatch"]; ok { + params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifMetagenerationNotMatch"]; ok { + params.Set("ifMetagenerationNotMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifSourceGenerationMatch"]; ok { + params.Set("ifSourceGenerationMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifSourceGenerationNotMatch"]; ok { + params.Set("ifSourceGenerationNotMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifSourceMetagenerationMatch"]; ok { + params.Set("ifSourceMetagenerationMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifSourceMetagenerationNotMatch"]; ok { + params.Set("ifSourceMetagenerationNotMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["maxBytesRewrittenPerCall"]; ok { + params.Set("maxBytesRewrittenPerCall", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["projection"]; ok { + params.Set("projection", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["rewriteToken"]; ok { + params.Set("rewriteToken", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["sourceGeneration"]; ok { + params.Set("sourceGeneration", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["fields"]; ok { + params.Set("fields", fmt.Sprintf("%v", v)) + } + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}") + urls += "?" + params.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "sourceBucket": c.sourceBucket, + "sourceObject": c.sourceObject, + "destinationBucket": c.destinationBucket, + "destinationObject": c.destinationObject, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + return c.s.client.Do(req) +} + +func (c *ObjectsRewriteCall) Do() (*RewriteResponse, error) { + res, err := c.doRequest("json") + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + var ret *RewriteResponse + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Rewrites a source object to a destination object. Optionally overrides metadata.", + // "httpMethod": "POST", + // "id": "storage.objects.rewrite", + // "parameterOrder": [ + // "sourceBucket", + // "sourceObject", + // "destinationBucket", + // "destinationObject" + // ], + // "parameters": { + // "destinationBucket": { + // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationObject": { + // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationPredefinedAcl": { + // "description": "Apply a predefined set of access controls to the destination object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the destination object's current generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceGenerationMatch": { + // "description": "Makes the operation conditional on whether the source object's generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the source object's generation does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "maxBytesRewrittenPerCall": { + // "description": "The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "rewriteToken": { + // "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.", + // "location": "query", + // "type": "string" + // }, + // "sourceBucket": { + // "description": "Name of the bucket in which to find the source object.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "sourceGeneration": { + // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "sourceObject": { + // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "RewriteResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.update": + +type ObjectsUpdateCall struct { + s *Service + bucket string + object string + object2 *Object + opt_ map[string]interface{} +} + +// Update: Updates an object's metadata. +func (r *ObjectsService) Update(bucket string, object string, object2 *Object) *ObjectsUpdateCall { + c := &ObjectsUpdateCall{s: r.s, opt_: make(map[string]interface{})} + c.bucket = bucket + c.object = object + c.object2 = object2 + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectsUpdateCall) Generation(generation int64) *ObjectsUpdateCall { + c.opt_["generation"] = generation + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. +func (c *ObjectsUpdateCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsUpdateCall { + c.opt_["ifGenerationMatch"] = ifGenerationMatch + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. +func (c *ObjectsUpdateCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsUpdateCall { + c.opt_["ifGenerationNotMatch"] = ifGenerationNotMatch + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsUpdateCall { + c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsUpdateCall { + c.opt_["ifMetagenerationNotMatch"] = ifMetagenerationNotMatch + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this object. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *ObjectsUpdateCall) PredefinedAcl(predefinedAcl string) *ObjectsUpdateCall { + c.opt_["predefinedAcl"] = predefinedAcl + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the acl property. +func (c *ObjectsUpdateCall) Projection(projection string) *ObjectsUpdateCall { + c.opt_["projection"] = projection + return c +} + +// Fields allows partial responses to be retrieved. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsUpdateCall) Fields(s ...googleapi.Field) *ObjectsUpdateCall { + c.opt_["fields"] = googleapi.CombineFields(s) + return c +} + +func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) + if err != nil { + return nil, err + } + ctype := "application/json" + params := make(url.Values) + params.Set("alt", alt) + if v, ok := c.opt_["generation"]; ok { + params.Set("generation", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifGenerationMatch"]; ok { + params.Set("ifGenerationMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifGenerationNotMatch"]; ok { + params.Set("ifGenerationNotMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifMetagenerationMatch"]; ok { + params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["ifMetagenerationNotMatch"]; ok { + params.Set("ifMetagenerationNotMatch", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["predefinedAcl"]; ok { + params.Set("predefinedAcl", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["projection"]; ok { + params.Set("projection", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["fields"]; ok { + params.Set("fields", fmt.Sprintf("%v", v)) + } + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls += "?" + params.Encode() + req, _ := http.NewRequest("PUT", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + return c.s.client.Do(req) +} + +// Download fetches the API endpoint's "media" value, instead of the normal +// API response value. If the returned error is nil, the Response is guaranteed to +// have a 2xx status code. Callers must close the Response.Body as usual. +func (c *ObjectsUpdateCall) Download() (*http.Response, error) { + res, err := c.doRequest("media") + if err != nil { + return nil, err + } + if err := googleapi.CheckMediaResponse(res); err != nil { + res.Body.Close() + return nil, err + } + return res, nil +} + +func (c *ObjectsUpdateCall) Do() (*Object, error) { + res, err := c.doRequest("json") + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + var ret *Object + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an object's metadata.", + // "httpMethod": "PUT", + // "id": "storage.objects.update", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the acl property." + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsMediaDownload": true + // } + +} + +// method id "storage.objects.watchAll": + +type ObjectsWatchAllCall struct { + s *Service + bucket string + channel *Channel + opt_ map[string]interface{} +} + +// WatchAll: Watch for changes on all objects in a bucket. +func (r *ObjectsService) WatchAll(bucket string, channel *Channel) *ObjectsWatchAllCall { + c := &ObjectsWatchAllCall{s: r.s, opt_: make(map[string]interface{})} + c.bucket = bucket + c.channel = channel + return c +} + +// Delimiter sets the optional parameter "delimiter": Returns results in +// a directory-like mode. items will contain only objects whose names, +// aside from the prefix, do not contain delimiter. Objects whose names, +// aside from the prefix, contain delimiter will have their name, +// truncated after the delimiter, returned in prefixes. Duplicate +// prefixes are omitted. +func (c *ObjectsWatchAllCall) Delimiter(delimiter string) *ObjectsWatchAllCall { + c.opt_["delimiter"] = delimiter + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of items plus prefixes to return. As duplicate prefixes are omitted, +// fewer total results may be returned than requested. The default value +// of this parameter is 1,000 items. +func (c *ObjectsWatchAllCall) MaxResults(maxResults int64) *ObjectsWatchAllCall { + c.opt_["maxResults"] = maxResults + return c +} + +// PageToken sets the optional parameter "pageToken": A +// previously-returned page token representing part of the larger set of +// results to view. +func (c *ObjectsWatchAllCall) PageToken(pageToken string) *ObjectsWatchAllCall { + c.opt_["pageToken"] = pageToken + return c +} + +// Prefix sets the optional parameter "prefix": Filter results to +// objects whose names begin with this prefix. +func (c *ObjectsWatchAllCall) Prefix(prefix string) *ObjectsWatchAllCall { + c.opt_["prefix"] = prefix + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the acl property. +func (c *ObjectsWatchAllCall) Projection(projection string) *ObjectsWatchAllCall { + c.opt_["projection"] = projection + return c +} + +// Versions sets the optional parameter "versions": If true, lists all +// versions of an object as distinct results. The default is false. For +// more information, see Object Versioning. +func (c *ObjectsWatchAllCall) Versions(versions bool) *ObjectsWatchAllCall { + c.opt_["versions"] = versions + return c +} + +// Fields allows partial responses to be retrieved. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsWatchAllCall) Fields(s ...googleapi.Field) *ObjectsWatchAllCall { + c.opt_["fields"] = googleapi.CombineFields(s) + return c +} + +func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) + if err != nil { + return nil, err + } + ctype := "application/json" + params := make(url.Values) + params.Set("alt", alt) + if v, ok := c.opt_["delimiter"]; ok { + params.Set("delimiter", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["maxResults"]; ok { + params.Set("maxResults", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["pageToken"]; ok { + params.Set("pageToken", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["prefix"]; ok { + params.Set("prefix", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["projection"]; ok { + params.Set("projection", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["versions"]; ok { + params.Set("versions", fmt.Sprintf("%v", v)) + } + if v, ok := c.opt_["fields"]; ok { + params.Set("fields", fmt.Sprintf("%v", v)) + } + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/watch") + urls += "?" + params.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + return c.s.client.Do(req) +} + +func (c *ObjectsWatchAllCall) Do() (*Channel, error) { + res, err := c.doRequest("json") + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + var ret *Channel + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Watch for changes on all objects in a bucket.", + // "httpMethod": "POST", + // "id": "storage.objects.watchAll", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which to look for objects.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "delimiter": { + // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "description": "Maximum number of items plus prefixes to return. As duplicate prefixes are omitted, fewer total results may be returned than requested. The default value of this parameter is 1,000 items.", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A previously-returned page token representing part of the larger set of results to view.", + // "location": "query", + // "type": "string" + // }, + // "prefix": { + // "description": "Filter results to objects whose names begin with this prefix.", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "versions": { + // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "b/{bucket}/o/watch", + // "request": { + // "$ref": "Channel", + // "parameterName": "resource" + // }, + // "response": { + // "$ref": "Channel" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsSubscription": true + // } + +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/.travis.yml b/Godeps/_workspace/src/google.golang.org/cloud/.travis.yml new file mode 100644 index 000000000..86e342326 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/.travis.yml @@ -0,0 +1,11 @@ +sudo: false +language: go +go: +- 1.4 +- tip +install: +- go get -v google.golang.org/cloud/... +script: +- openssl aes-256-cbc -K $encrypted_912ff8fa81ad_key -iv $encrypted_912ff8fa81ad_iv -in key.json.enc -out key.json -d +- GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" GCLOUD_TESTS_GOLANG_KEY="$(pwd)/key.json" + go test -v -tags=integration google.golang.org/cloud/... diff --git a/Godeps/_workspace/src/google.golang.org/cloud/AUTHORS b/Godeps/_workspace/src/google.golang.org/cloud/AUTHORS new file mode 100644 index 000000000..3da443dc9 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/AUTHORS @@ -0,0 +1,12 @@ +# This is the official list of cloud authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as: +# Name or Organization +# The email address is not required for organizations. + +Google Inc. +Palm Stone Games, Inc. +Péter Szilágyi +Tyler Treat diff --git a/Godeps/_workspace/src/google.golang.org/cloud/CONTRIBUTING.md b/Godeps/_workspace/src/google.golang.org/cloud/CONTRIBUTING.md new file mode 100644 index 000000000..9a1cab287 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/CONTRIBUTING.md @@ -0,0 +1,114 @@ +# Contributing + +1. Sign one of the contributor license agreements below. +1. `go get golang.org/x/review/git-codereview` to install the code reviewing tool. +1. Get the cloud package by running `go get -d google.golang.org/cloud`. + 1. If you have already checked out the source, make sure that the remote git + origin is https://code.googlesource.com/gocloud: + + git remote set-url origin https://code.googlesource.com/gocloud +1. Make changes and create a change by running `git codereview change `, +provide a command message, and use `git codereview mail` to create a Gerrit CL. +1. Keep amending to the change and mail as your recieve feedback. + +## Integration Tests + +Additional to the unit tests, you may run the integration test suite. + +To run the integrations tests, creating and configuration of a project in the +Google Developers Console is required. Once you create a project, set the +following environment variables to be able to run the against the actual APIs. + +- **GCLOUD_TESTS_GOLANG_PROJECT_ID**: Developers Console project's ID (e.g. bamboo-shift-455) +- **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file. + +Create a storage bucket with the same name as the project id set in **GCLOUD_TESTS_GOLANG_PROJECT_ID**. +The storage integration test will create and delete some objects in this bucket. + +Install the [gcloud command-line tool][gcloudcli] to your machine and use it +to create the indexes used in the datastore integration tests with indexes +found in `datastore/testdata/index.yaml`: + +From the project's root directory: + +``` sh +# Install the app component +$ gcloud components update app + +# Set the default project in your env +$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID + +# Authenticate the gcloud tool with your account +$ gcloud auth login + +# Create the indexes +$ gcloud preview datastore create-indexes datastore/testdata/index.yaml + +``` + +You can run the integration tests by running: + +``` sh +$ go test -v -tags=integration google.golang.org/cloud/... +``` + +## Contributor License Agreements + +Before we can accept your pull requests you'll need to sign a Contributor +License Agreement (CLA): + +- **If you are an individual writing original source code** and **you own the +- intellectual property**, then you'll need to sign an [individual CLA][indvcla]. +- **If you work for a company that wants to allow you to contribute your work**, +then you'll need to sign a [corporate CLA][corpcla]. + +You can sign these electronically (just scroll to the bottom). After that, +we'll be able to accept your pull requests. + +## Contributor Code of Conduct + +As contributors and maintainers of this project, +and in the interest of fostering an open and welcoming community, +we pledge to respect all people who contribute through reporting issues, +posting feature requests, updating documentation, +submitting pull requests or patches, and other activities. + +We are committed to making participation in this project +a harassment-free experience for everyone, +regardless of level of experience, gender, gender identity and expression, +sexual orientation, disability, personal appearance, +body size, race, ethnicity, age, religion, or nationality. + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery +* Personal attacks +* Trolling or insulting/derogatory comments +* Public or private harassment +* Publishing other's private information, +such as physical or electronic +addresses, without explicit permission +* Other unethical or unprofessional conduct. + +Project maintainers have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct. +By adopting this Code of Conduct, +project maintainers commit themselves to fairly and consistently +applying these principles to every aspect of managing this project. +Project maintainers who do not follow or enforce the Code of Conduct +may be permanently removed from the project team. + +This code of conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. + +Instances of abusive, harassing, or otherwise unacceptable behavior +may be reported by opening an issue +or contacting one or more of the project maintainers. + +This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, +available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) + +[gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/ +[indvcla]: https://developers.google.com/open-source/cla/individual +[corpcla]: https://developers.google.com/open-source/cla/corporate diff --git a/Godeps/_workspace/src/google.golang.org/cloud/CONTRIBUTORS b/Godeps/_workspace/src/google.golang.org/cloud/CONTRIBUTORS new file mode 100644 index 000000000..475ac6a66 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/CONTRIBUTORS @@ -0,0 +1,24 @@ +# People who have agreed to one of the CLAs and can contribute patches. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# https://developers.google.com/open-source/cla/individual +# https://developers.google.com/open-source/cla/corporate +# +# Names should be added to this file as: +# Name + +# Keep the list alphabetically sorted. + +Andrew Gerrand +Brad Fitzpatrick +Burcu Dogan +Dave Day +David Symonds +Glenn Lewis +Johan Euphrosine +Luna Duclos +Michael McGreevy +Péter Szilágyi +Tyler Treat diff --git a/Godeps/_workspace/src/google.golang.org/cloud/LICENSE b/Godeps/_workspace/src/google.golang.org/cloud/LICENSE new file mode 100644 index 000000000..a4c5efd82 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2014 Google Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/google.golang.org/cloud/README.md b/Godeps/_workspace/src/google.golang.org/cloud/README.md new file mode 100644 index 000000000..10d3995d5 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/README.md @@ -0,0 +1,135 @@ +# Google Cloud for Go + +[![Build Status](https://travis-ci.org/GoogleCloudPlatform/gcloud-golang.svg?branch=master)](https://travis-ci.org/GoogleCloudPlatform/gcloud-golang) + +**NOTE:** These packages are experimental, and may occasionally make +backwards-incompatible changes. + +**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud). + +Go packages for Google Cloud Platform services. Supported APIs include: + + * Google Cloud Datastore + * Google Cloud Storage + * Google Cloud Pub/Sub + * Google Cloud Container Engine + +``` go +import "google.golang.org/cloud" +``` + +Documentation and examples are available at +[https://godoc.org/google.golang.org/cloud](https://godoc.org/google.golang.org/cloud). + +## Authorization + +Authorization, throughout the package, is delegated to the godoc.org/golang.org/x/oauth2. +Refer to the [godoc documentation](https://godoc.org/golang.org/x/oauth2) +for examples on using oauth2 with the Cloud package. + +## Google Cloud Datastore + +[Google Cloud Datastore][cloud-datastore] ([docs][cloud-datastore-docs]) is a fully +managed, schemaless database for storing non-relational data. Cloud Datastore +automatically scales with your users and supports ACID transactions, high availability +of reads and writes, strong consistency for reads and ancestor queries, and eventual +consistency for all other queries. + +Follow the [activation instructions][cloud-datastore-activation] to use the Google +Cloud Datastore API with your project. + +[https://godoc.org/google.golang.org/cloud/datastore](https://godoc.org/google.golang.org/cloud/datastore) + + +```go +type Post struct { + Title string + Body string `datastore:",noindex"` + PublishedAt time.Time +} +keys := []*datastore.Key{ + datastore.NewKey(ctx, "Post", "post1", 0, nil), + datastore.NewKey(ctx, "Post", "post2", 0, nil), +} +posts := []*Post{ + {Title: "Post 1", Body: "...", PublishedAt: time.Now()}, + {Title: "Post 2", Body: "...", PublishedAt: time.Now()}, +} +if _, err := datastore.PutMulti(ctx, keys, posts); err != nil { + log.Println(err) +} +``` + +## Google Cloud Storage + +[Google Cloud Storage][cloud-storage] ([docs][cloud-storage-docs]) allows you to store +data on Google infrastructure with very high reliability, performance and availability, +and can be used to distribute large data objects to users via direct download. + +[https://godoc.org/google.golang.org/cloud/storage](https://godoc.org/google.golang.org/cloud/storage) + + +```go +// Read the object1 from bucket. +rc, err := storage.NewReader(ctx, "bucket", "object1") +if err != nil { + log.Fatal(err) +} +slurp, err := ioutil.ReadAll(rc) +rc.Close() +if err != nil { + log.Fatal(err) +} +``` + +## Google Cloud Pub/Sub (Alpha) + +> Google Cloud Pub/Sub is in **Alpha status**. As a result, it might change in +> backward-incompatible ways and is not recommended for production use. It is not +> subject to any SLA or deprecation policy. + +[Google Cloud Pub/Sub][cloud-pubsub] ([docs][cloud-pubsub-docs]) allows you to connect +your services with reliable, many-to-many, asynchronous messaging hosted on Google's +infrastructure. Cloud Pub/Sub automatically scales as you need it and provides a foundation +for building your own robust, global services. + +[https://godoc.org/google.golang.org/cloud/pubsub](https://godoc.org/google.golang.org/cloud/pubsub) + + +```go +// Publish "hello world" on topic1. +msgIDs, err := pubsub.Publish(ctx, "topic1", &pubsub.Message{ + Data: []byte("hello world"), +}) +if err != nil { + log.Println(err) +} +// Pull messages via subscription1. +msgs, err := pubsub.Pull(ctx, "subscription1", 1) +if err != nil { + log.Println(err) +} +``` + +## Contributing + +Contributions are welcome. Please, see the +[CONTRIBUTING](https://github.com/GoogleCloudPlatform/gcloud-golang/blob/master/CONTRIBUTING.md) +document for details. We're using Gerrit for our code reviews. Please don't open pull +requests against this repo, new pull requests will be automatically closed. + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. +See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/gcloud-golang/blob/master/CONTRIBUTING.md#contributor-code-of-conduct) +for more information. + +[cloud-datastore]: https://cloud.google.com/datastore/ +[cloud-datastore-docs]: https://cloud.google.com/datastore/docs +[cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate + +[cloud-pubsub]: https://cloud.google.com/pubsub/ +[cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs + +[cloud-storage]: https://cloud.google.com/storage/ +[cloud-storage-docs]: https://cloud.google.com/storage/docs/overview +[cloud-storage-create-bucket]: https://cloud.google.com/storage/docs/cloud-console#_creatingbuckets diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/bigquery.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/bigquery.go new file mode 100644 index 000000000..ede48d1df --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/bigquery.go @@ -0,0 +1,148 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +// TODO(mcgreevy): support dry-run mode when creating jobs. + +import ( + "fmt" + "net/http" + + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +// A Source is a source of data for the Copy function. +type Source interface { + implementsSource() +} + +// A Destination is a destination of data for the Copy function. +type Destination interface { + implementsDestination() +} + +// An Option is an optional argument to Copy. +type Option interface { + implementsOption() +} + +// A ReadSource is a source of data for the Read function. +type ReadSource interface { + implementsReadSource() +} + +// A ReadOption is an optional argument to Read. +type ReadOption interface { + customizeRead(conf *pagingConf) +} + +const Scope = "https://www.googleapis.com/auth/bigquery" + +// Client may be used to perform BigQuery operations. +type Client struct { + service service + projectID string +} + +// Note: many of the methods on *Client appear in the various *_op.go source files. + +// NewClient constructs a new Client which can perform BigQuery operations. +// Operations performed via the client are billed to the specified GCP project. +// The supplied http.Client is used for making requests to the BigQuery server and must be capable of +// authenticating requests with Scope. +func NewClient(client *http.Client, projectID string) (*Client, error) { + bqService, err := newBigqueryService(client) + if err != nil { + return nil, fmt.Errorf("constructing bigquery client: %v", err) + } + + c := &Client{ + service: bqService, + projectID: projectID, + } + return c, nil +} + +// initJobProto creates and returns a bigquery Job proto. +// The proto is customized using any jobOptions in options. +// The list of Options is returned with the jobOptions removed. +func initJobProto(projectID string, options []Option) (*bq.Job, []Option) { + job := &bq.Job{} + + var other []Option + for _, opt := range options { + if o, ok := opt.(jobOption); ok { + o.customizeJob(job, projectID) + } else { + other = append(other, opt) + } + } + return job, other +} + +// Copy starts a BigQuery operation to copy data from a Source to a Destination. +func (c *Client) Copy(ctx context.Context, dst Destination, src Source, options ...Option) (*Job, error) { + switch dst := dst.(type) { + case *Table: + switch src := src.(type) { + case *GCSReference: + return c.load(ctx, dst, src, options) + case *Table: + return c.cp(ctx, dst, Tables{src}, options) + case Tables: + return c.cp(ctx, dst, src, options) + case *Query: + return c.query(ctx, dst, src, options) + } + case *GCSReference: + if src, ok := src.(*Table); ok { + return c.extract(ctx, dst, src, options) + } + } + return nil, fmt.Errorf("no Copy operation matches dst/src pair: dst: %T ; src: %T", dst, src) +} + +// Read fetches data from a ReadSource and returns the data via an Iterator. +func (c *Client) Read(ctx context.Context, src ReadSource, options ...ReadOption) (*Iterator, error) { + // TODO(mcgreevy): use ctx. + switch src := src.(type) { + case *Job: + return c.readQueryResults(src, options) + case *Query: + return c.executeQuery(ctx, src, options...) + case *Table: + return c.readTable(src, options) + } + return nil, fmt.Errorf("src (%T) does not support the Read operation", src) +} + +// executeQuery submits a query for execution and returns the results via an Iterator. +func (c *Client) executeQuery(ctx context.Context, q *Query, options ...ReadOption) (*Iterator, error) { + dest := &Table{} + job, err := c.Copy(ctx, dest, q, WriteTruncate) + if err != nil { + return nil, err + } + + return c.Read(ctx, job, options...) +} + +func (c *Client) Dataset(id string) *Dataset { + return &Dataset{ + id: id, + client: c, + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/copy_op.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/copy_op.go new file mode 100644 index 000000000..ec0e45e39 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/copy_op.go @@ -0,0 +1,47 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "fmt" + + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +type copyOption interface { + customizeCopy(conf *bq.JobConfigurationTableCopy, projectID string) +} + +func (c *Client) cp(ctx context.Context, dst *Table, src Tables, options []Option) (*Job, error) { + job, options := initJobProto(c.projectID, options) + payload := &bq.JobConfigurationTableCopy{} + + dst.customizeCopyDst(payload, c.projectID) + src.customizeCopySrc(payload, c.projectID) + + for _, opt := range options { + o, ok := opt.(copyOption) + if !ok { + return nil, fmt.Errorf("option (%#v) not applicable to dst/src pair: dst: %T ; src: %T", opt, dst, src) + } + o.customizeCopy(payload, c.projectID) + } + + job.Configuration = &bq.JobConfiguration{ + Copy: payload, + } + return c.service.insertJob(ctx, job, c.projectID) +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/copy_test.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/copy_test.go new file mode 100644 index 000000000..26e40ec3b --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/copy_test.go @@ -0,0 +1,104 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "reflect" + "testing" + + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +func defaultCopyJob() *bq.Job { + return &bq.Job{ + Configuration: &bq.JobConfiguration{ + Copy: &bq.JobConfigurationTableCopy{ + DestinationTable: &bq.TableReference{ + ProjectId: "d-project-id", + DatasetId: "d-dataset-id", + TableId: "d-table-id", + }, + SourceTables: []*bq.TableReference{ + { + ProjectId: "s-project-id", + DatasetId: "s-dataset-id", + TableId: "s-table-id", + }, + }, + }, + }, + } +} + +func TestCopy(t *testing.T) { + testCases := []struct { + dst *Table + src Tables + options []Option + want *bq.Job + }{ + { + dst: &Table{ + ProjectID: "d-project-id", + DatasetID: "d-dataset-id", + TableID: "d-table-id", + }, + src: Tables{ + { + ProjectID: "s-project-id", + DatasetID: "s-dataset-id", + TableID: "s-table-id", + }, + }, + want: defaultCopyJob(), + }, + { + dst: &Table{ + ProjectID: "d-project-id", + DatasetID: "d-dataset-id", + TableID: "d-table-id", + }, + src: Tables{ + { + ProjectID: "s-project-id", + DatasetID: "s-dataset-id", + TableID: "s-table-id", + }, + }, + options: []Option{CreateNever, WriteTruncate}, + want: func() *bq.Job { + j := defaultCopyJob() + j.Configuration.Copy.CreateDisposition = "CREATE_NEVER" + j.Configuration.Copy.WriteDisposition = "WRITE_TRUNCATE" + return j + }(), + }, + } + + for _, tc := range testCases { + s := &testService{} + c := &Client{ + service: s, + } + if _, err := c.Copy(context.Background(), tc.dst, tc.src, tc.options...); err != nil { + t.Errorf("err calling cp: %v", err) + continue + } + if !reflect.DeepEqual(s.Job, tc.want) { + t.Errorf("copying: got:\n%v\nwant:\n%v", s.Job, tc.want) + } + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/dataset.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/dataset.go new file mode 100644 index 000000000..3f85935a2 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/dataset.go @@ -0,0 +1,41 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import "golang.org/x/net/context" + +// Dataset is a reference to a BigQuery dataset. +type Dataset struct { + id string + client *Client +} + +// ListTables returns a list of all the tables contained in the Dataset. +func (d *Dataset) ListTables(ctx context.Context) ([]*Table, error) { + var tables []*Table + + err := getPages("", func(pageToken string) (string, error) { + ts, tok, err := d.client.service.listTables(ctx, d.client.projectID, d.id, pageToken) + if err == nil { + tables = append(tables, ts...) + } + return tok, err + }) + + if err != nil { + return nil, err + } + return tables, nil +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/dataset_test.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/dataset_test.go new file mode 100644 index 000000000..79c9c4e04 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/dataset_test.go @@ -0,0 +1,105 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "reflect" + "testing" + + "golang.org/x/net/context" +) + +// readServiceStub services read requests by returning data from an in-memory list of values. +type listTablesServiceStub struct { + expectedProject, expectedDataset string + values [][]*Table // contains pages of tables. + pageTokens map[string]string // maps incoming page token to returned page token. + + service +} + +func (s *listTablesServiceStub) listTables(ctx context.Context, projectID, datasetID, pageToken string) ([]*Table, string, error) { + if projectID != s.expectedProject { + return nil, "", errors.New("wrong project id") + } + if datasetID != s.expectedDataset { + return nil, "", errors.New("wrong dataset id") + } + + tables := s.values[0] + s.values = s.values[1:] + return tables, s.pageTokens[pageToken], nil +} + +func TestListTables(t *testing.T) { + t1 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t1"} + t2 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t2"} + t3 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t3"} + testCases := []struct { + data [][]*Table + pageTokens map[string]string + want []*Table + }{ + { + data: [][]*Table{{t1, t2}, {t3}}, + pageTokens: map[string]string{"": "a", "a": ""}, + want: []*Table{t1, t2, t3}, + }, + { + data: [][]*Table{{t1, t2}, {t3}}, + pageTokens: map[string]string{"": ""}, // no more pages after first one. + want: []*Table{t1, t2}, + }, + } + + for _, tc := range testCases { + c := &Client{ + service: &listTablesServiceStub{ + expectedProject: "x", + expectedDataset: "y", + values: tc.data, + pageTokens: tc.pageTokens, + }, + projectID: "x", + } + got, err := c.Dataset("y").ListTables(context.Background()) + if err != nil { + t.Errorf("err calling ListTables: %v", err) + continue + } + + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("reading: got:\n%v\nwant:\n%v", got, tc.want) + } + } +} + +func TestListTablesError(t *testing.T) { + c := &Client{ + service: &listTablesServiceStub{ + expectedProject: "x", + expectedDataset: "y", + }, + projectID: "x", + } + // Test that service read errors are propagated back to the caller. + // Passing "not y" as the dataset id will cause the service to return an error. + _, err := c.Dataset("not y").ListTables(context.Background()) + if err == nil { + // Read should not return an error; only Err should. + t.Errorf("ListTables expected: non-nil err, got: nil") + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/doc.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/doc.go new file mode 100644 index 000000000..3c86bf78c --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/doc.go @@ -0,0 +1,18 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package bigquery provides a client for the BigQuery service. +// +// Note: This package is a work-in-progress. Backwards-incompatible changes should be expected. +package bigquery diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/error.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/error.go new file mode 100644 index 000000000..b2e3e3f28 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/error.go @@ -0,0 +1,42 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "fmt" + + bq "google.golang.org/api/bigquery/v2" +) + +// An Error contains detailed information about an error encountered while processing a job. +type Error struct { + // Mirrors bq.ErrorProto, but drops DebugInfo + Location, Message, Reason string +} + +func (e Error) Error() string { + return fmt.Sprintf("{Location: %q; Message: %q; Reason: %q}", e.Location, e.Message, e.Reason) +} + +func errorFromErrorProto(ep *bq.ErrorProto) *Error { + if ep == nil { + return nil + } + return &Error{ + Location: ep.Location, + Message: ep.Message, + Reason: ep.Reason, + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/extract_op.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/extract_op.go new file mode 100644 index 000000000..63a6428f1 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/extract_op.go @@ -0,0 +1,59 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "fmt" + + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +type extractOption interface { + customizeExtract(conf *bq.JobConfigurationExtract, projectID string) +} + +// DisableHeader returns an Option that disables the printing of a header row in exported data. +func DisableHeader() Option { return disableHeader{} } + +type disableHeader struct{} + +func (opt disableHeader) implementsOption() {} + +func (opt disableHeader) customizeExtract(conf *bq.JobConfigurationExtract, projectID string) { + f := false + conf.PrintHeader = &f +} + +func (c *Client) extract(ctx context.Context, dst *GCSReference, src *Table, options []Option) (*Job, error) { + job, options := initJobProto(c.projectID, options) + payload := &bq.JobConfigurationExtract{} + + dst.customizeExtractDst(payload, c.projectID) + src.customizeExtractSrc(payload, c.projectID) + + for _, opt := range options { + o, ok := opt.(extractOption) + if !ok { + return nil, fmt.Errorf("option (%#v) not applicable to dst/src pair: dst: %T ; src: %T", opt, dst, src) + } + o.customizeExtract(payload, c.projectID) + } + + job.Configuration = &bq.JobConfiguration{ + Extract: payload, + } + return c.service.insertJob(ctx, job, c.projectID) +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/extract_test.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/extract_test.go new file mode 100644 index 000000000..4fa647491 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/extract_test.go @@ -0,0 +1,97 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "reflect" + "testing" + + "golang.org/x/net/context" + + bq "google.golang.org/api/bigquery/v2" +) + +func defaultExtractJob() *bq.Job { + return &bq.Job{ + Configuration: &bq.JobConfiguration{ + Extract: &bq.JobConfigurationExtract{ + SourceTable: &bq.TableReference{ + ProjectId: "project-id", + DatasetId: "dataset-id", + TableId: "table-id", + }, + DestinationUris: []string{"uri"}, + }, + }, + } +} + +func TestExtract(t *testing.T) { + testCases := []struct { + dst *GCSReference + src *Table + options []Option + want *bq.Job + }{ + { + dst: defaultGCS, + src: defaultTable, + want: defaultExtractJob(), + }, + { + dst: defaultGCS, + src: defaultTable, + options: []Option{ + DisableHeader(), + }, + want: func() *bq.Job { + j := defaultExtractJob() + f := false + j.Configuration.Extract.PrintHeader = &f + return j + }(), + }, + { + dst: &GCSReference{ + uris: []string{"uri"}, + Compression: Gzip, + DestinationFormat: JSON, + FieldDelimiter: "\t", + }, + src: defaultTable, + want: func() *bq.Job { + j := defaultExtractJob() + j.Configuration.Extract.Compression = "GZIP" + j.Configuration.Extract.DestinationFormat = "NEWLINE_DELIMITED_JSON" + j.Configuration.Extract.FieldDelimiter = "\t" + return j + }(), + }, + } + + for _, tc := range testCases { + s := &testService{} + c := &Client{ + service: s, + } + if _, err := c.Copy(context.Background(), tc.dst, tc.src, tc.options...); err != nil { + t.Errorf("err calling extract: %v", err) + continue + } + if !reflect.DeepEqual(s.Job, tc.want) { + t.Errorf("extracting: got:\n%v\nwant:\n%v", s.Job, tc.want) + } + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/gcs.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/gcs.go new file mode 100644 index 000000000..d48859bf0 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/gcs.go @@ -0,0 +1,112 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import bq "google.golang.org/api/bigquery/v2" + +// GCSReference is a reference to one or more Google Cloud Storage objects, which together constitute +// an input or output to a BigQuery operation. +type GCSReference struct { + uris []string + + // FieldDelimiter is the separator for fields in a CSV file, used when loading or exporting data. + // The default is ",". + FieldDelimiter string + + // The number of rows at the top of a CSV file that BigQuery will skip when loading the data. + SkipLeadingRows int64 + + // SourceFormat is the format of the GCS data to be loaded into BigQuery. + // Allowed values are: CSV, JSON, DatastoreBackup. The default is CSV. + SourceFormat DataFormat + // Only used when loading data. + Encoding Encoding + + // Quote is the value used to quote data sections in a CSV file. + // The default quotation character is the double quote ("), which is used if both Quote and ForceZeroQuote are unset. + // To specify that no character should be interpreted as a quotation character, set ForceZeroQuote to true. + // Only used when loading data. + Quote string + ForceZeroQuote bool + + // DestinationFormat is the format to use when writing exported files. + // Allowed values are: CSV, Avro, JSON. The default is CSV. + // CSV is not supported for tables with nested or repeated fields. + DestinationFormat DataFormat + // Only used when writing data. Default is None. + Compression Compression +} + +func (gcs *GCSReference) implementsSource() {} +func (gcs *GCSReference) implementsDestination() {} + +// NewGCSReference constructs a reference to one or more Google Cloud Storage objects, which together constitute a data source or destination. +// In the simple case, a single URI in the form gs://bucket/object may refer to a single GCS object. +// Data may also be split into mutiple files, if multiple URIs or URIs containing wildcards are provided. +// Each URI may contain one '*' wildcard character, which (if present) must come after the bucket name. +// For more information about the treatment of wildcards and multiple URIs, +// see https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple +func (c *Client) NewGCSReference(uri ...string) *GCSReference { + return &GCSReference{uris: uri} +} + +type DataFormat string + +const ( + CSV DataFormat = "CSV" + Avro DataFormat = "AVRO" + JSON DataFormat = "NEWLINE_DELIMITED_JSON" + DatastoreBackup DataFormat = "DATASTORE_BACKUP" +) + +// Encoding specifies the character encoding of data to be loaded into BigQuery. +// See https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.encoding +// for more details about how this is used. +type Encoding string + +const ( + UTF_8 Encoding = "UTF-8" + ISO_8859_1 Encoding = "ISO-8859-1" +) + +// Compression is the type of compression to apply when writing data to Google Cloud Storage. +type Compression string + +const ( + None Compression = "NONE" + Gzip Compression = "GZIP" +) + +func (gcs *GCSReference) customizeLoadSrc(conf *bq.JobConfigurationLoad, projectID string) { + conf.SourceUris = gcs.uris + conf.SkipLeadingRows = gcs.SkipLeadingRows + conf.SourceFormat = string(gcs.SourceFormat) + conf.Encoding = string(gcs.Encoding) + conf.FieldDelimiter = gcs.FieldDelimiter + + if gcs.ForceZeroQuote { + quote := "" + conf.Quote = "e + } else if gcs.Quote != "" { + conf.Quote = &gcs.Quote + } +} + +func (gcs *GCSReference) customizeExtractDst(conf *bq.JobConfigurationExtract, projectID string) { + conf.DestinationUris = gcs.uris + conf.Compression = string(gcs.Compression) + conf.DestinationFormat = string(gcs.DestinationFormat) + conf.FieldDelimiter = gcs.FieldDelimiter +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/iterator.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/iterator.go new file mode 100644 index 000000000..ff8fdc027 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/iterator.go @@ -0,0 +1,168 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "fmt" + + "golang.org/x/net/context" +) + +// A pageFetcher returns a page of rows, starting from the row specified by token. +type pageFetcher interface { + fetch(ctx context.Context, c *Client, token string) (*readDataResult, error) +} + +// Iterator provides access to the result of a BigQuery lookup. +// Next must be called before the first call to Get. +type Iterator struct { + c *Client + + err error // contains any error encountered during calls to Next. + + // Once Next has been called at least once, rs contains the current + // page of data and nextToken contains the token for fetching the next + // page (empty if there is no more data to be fetched). + rs [][]Value + nextToken string + + // The remaining fields contain enough information to fetch the current + // page of data, and determine which row of data from this page is the + // current row. + + pf pageFetcher + pageToken string + + // The offset from the start of the current page to the current row. + // For a new iterator, this is -1. + offset int64 +} + +func newIterator(c *Client, pf pageFetcher) *Iterator { + return &Iterator{ + c: c, + pf: pf, + offset: -1, + } +} + +// fetchPage loads the current page of data from the server. +// The contents of rs and nextToken are replaced with the loaded data. +// If there is an error while fetching, the error is stored in it.err and false is returned. +func (it *Iterator) fetchPage(ctx context.Context) bool { + var res *readDataResult + var err error + for { + res, err = it.pf.fetch(ctx, it.c, it.pageToken) + if err != errIncompleteJob { + break + } + } + + if err != nil { + it.err = err + return false + } + + it.rs = res.rows + it.nextToken = res.pageToken + return true +} + +// getEnoughData loads new data into rs until offset no longer points beyond the end of rs. +func (it *Iterator) getEnoughData(ctx context.Context) bool { + if len(it.rs) == 0 { + // Either we have not yet fetched any pages, or we are iterating over an empty dataset. + // In the former case, we should fetch a page of data, so that we can depend on the resultant nextToken. + // In the latter case, it is harmless to fetch a page of data. + if !it.fetchPage(ctx) { + return false + } + } + + for it.offset >= int64(len(it.rs)) { + // If offset is still outside the bounds of the loaded data, + // but there are no more pages of data to fetch, then we have + // failed to satisfy the offset. + if it.nextToken == "" { + return false + } + + // offset cannot be satisfied with the currently loaded data, + // so we fetch the next page. We no longer need the existing + // cached rows, so we remove them and update the offset to be + // relative to the new page that we're about to fetch. + // NOTE: we can't just set offset to 0, because after + // marshalling/unmarshalling, it's possible for the offset to + // point arbitrarily far beyond the end of rs. + // This can happen if the server returns a different size + // results page before and after marshalling. + it.offset -= int64(len(it.rs)) + it.pageToken = it.nextToken + if !it.fetchPage(ctx) { + return false + } + } + return true +} + +// Next advances the Iterator to the next row, making that row available +// via the Get method. +// Next must be called before the first call to Get, and blocks until data is available. +// Next returns false when there are no more rows available, either because +// the end of the output was reached, or because there was an error (consult +// the Err method to determine which). +func (it *Iterator) Next(ctx context.Context) bool { + if it.err != nil { + return false + } + + // Advance offset to where we want it to be for the next call to Get. + it.offset++ + + // offset may now point beyond the end of rs, so we fetch data + // until offset is within its bounds again. If there are no more + // results available, offset will be left pointing beyond the bounds + // of rs. + // At the end of this method, rs will contain at least one element + // unless the dataset we are iterating over is empty. + return it.getEnoughData(ctx) +} + +// Err returns the last error encountered by Next, or nil for no error. +func (it *Iterator) Err() error { + return it.err +} + +// Get loads the current row into dst, which must implement ValueLoader. +func (it *Iterator) Get(dst interface{}) error { + if it.err != nil { + return fmt.Errorf("Get called on iterator in error state: %v", it.err) + } + + // If Next has been called, then offset should always index into a + // valid row in rs, as long as there is still data available. + if it.offset >= int64(len(it.rs)) || it.offset < 0 { + return errors.New("Get called without preceding successful call to Next") + } + + if dst, ok := dst.(ValueLoader); ok { + return dst.Load(it.rs[it.offset]) + } + return errors.New("Get called with unsupported argument type") +} + +// TODO(mcgreevy): Add a method to *Iterator that returns a schema which describes the data. diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/iterator_test.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/iterator_test.go new file mode 100644 index 000000000..b1f23c28a --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/iterator_test.go @@ -0,0 +1,415 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "fmt" + "reflect" + "testing" + + "golang.org/x/net/context" +) + +type fetchResponse struct { + result *readDataResult // The result to return. + err error // The error to return. +} + +// pageFetcherStub services fetch requests by returning data from an in-memory list of values. +type pageFetcherStub struct { + fetchResponses map[string]fetchResponse + + err error +} + +func (pf *pageFetcherStub) fetch(ctx context.Context, c *Client, token string) (*readDataResult, error) { + call, ok := pf.fetchResponses[token] + if !ok { + pf.err = fmt.Errorf("Unexpected page token: %q", token) + } + return call.result, call.err +} + +func TestIterator(t *testing.T) { + fetchFailure := errors.New("fetch failure") + + testCases := []struct { + desc string + alreadyConsumed int64 // amount to advance offset before commencing reading. + fetchResponses map[string]fetchResponse + want []ValueList + wantErr error + }{ + { + desc: "Iteration over single empty page", + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "", + rows: [][]Value{}, + }, + }, + }, + want: []ValueList{}, + }, + { + desc: "Iteration over single page", + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "", + rows: [][]Value{{1, 2}, {11, 12}}, + }, + }, + }, + want: []ValueList{{1, 2}, {11, 12}}, + }, + { + desc: "Iteration over two pages", + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "a", + rows: [][]Value{{1, 2}, {11, 12}}, + }, + }, + "a": { + result: &readDataResult{ + pageToken: "", + rows: [][]Value{{101, 102}, {111, 112}}, + }, + }, + }, + want: []ValueList{{1, 2}, {11, 12}, {101, 102}, {111, 112}}, + }, + { + desc: "Server response includes empty page", + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "a", + rows: [][]Value{{1, 2}, {11, 12}}, + }, + }, + "a": { + result: &readDataResult{ + pageToken: "b", + rows: [][]Value{}, + }, + }, + "b": { + result: &readDataResult{ + pageToken: "", + rows: [][]Value{{101, 102}, {111, 112}}, + }, + }, + }, + want: []ValueList{{1, 2}, {11, 12}, {101, 102}, {111, 112}}, + }, + { + desc: "Fetch error", + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "a", + rows: [][]Value{{1, 2}, {11, 12}}, + }, + }, + "a": { + // We returns some data from this fetch, but also an error. + // So the end result should include only data from the previous fetch. + err: fetchFailure, + result: &readDataResult{ + pageToken: "b", + rows: [][]Value{{101, 102}, {111, 112}}, + }, + }, + }, + want: []ValueList{{1, 2}, {11, 12}}, + wantErr: fetchFailure, + }, + { + desc: "Skip over a single element", + alreadyConsumed: 1, + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "a", + rows: [][]Value{{1, 2}, {11, 12}}, + }, + }, + "a": { + result: &readDataResult{ + pageToken: "", + rows: [][]Value{{101, 102}, {111, 112}}, + }, + }, + }, + want: []ValueList{{11, 12}, {101, 102}, {111, 112}}, + }, + { + desc: "Skip over an entire page", + alreadyConsumed: 2, + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "a", + rows: [][]Value{{1, 2}, {11, 12}}, + }, + }, + "a": { + result: &readDataResult{ + pageToken: "", + rows: [][]Value{{101, 102}, {111, 112}}, + }, + }, + }, + want: []ValueList{{101, 102}, {111, 112}}, + }, + { + desc: "Skip beyond start of second page", + alreadyConsumed: 3, + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "a", + rows: [][]Value{{1, 2}, {11, 12}}, + }, + }, + "a": { + result: &readDataResult{ + pageToken: "", + rows: [][]Value{{101, 102}, {111, 112}}, + }, + }, + }, + want: []ValueList{{111, 112}}, + }, + { + desc: "Skip beyond all data", + alreadyConsumed: 4, + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "a", + rows: [][]Value{{1, 2}, {11, 12}}, + }, + }, + "a": { + result: &readDataResult{ + pageToken: "", + rows: [][]Value{{101, 102}, {111, 112}}, + }, + }, + }, + // In this test case, Next will return false on its first call, + // so we won't even attempt to call Get. + want: []ValueList{}, + }, + } + + for _, tc := range testCases { + pf := &pageFetcherStub{ + fetchResponses: tc.fetchResponses, + } + it := newIterator(nil, pf) + it.offset += tc.alreadyConsumed + + values, err := consumeIterator(it) + if err != nil { + t.Fatalf("%s: %v", tc.desc, err) + } + + if (len(values) != 0 || len(tc.want) != 0) && !reflect.DeepEqual(values, tc.want) { + t.Errorf("%s: values:\ngot: %v\nwant:%v", tc.desc, values, tc.want) + } + if it.Err() != tc.wantErr { + t.Errorf("%s: iterator.Err:\ngot: %v\nwant: %v", tc.desc, it.Err(), tc.wantErr) + } + } +} + +// consumeIterator reads all values from an iterator and returns them. +func consumeIterator(it *Iterator) ([]ValueList, error) { + var got []ValueList + for it.Next(context.Background()) { + var vals ValueList + if err := it.Get(&vals); err != nil { + return nil, fmt.Errorf("err calling Get: %v", err) + } else { + got = append(got, vals) + } + } + + return got, nil +} + +func TestGetBeforeNext(t *testing.T) { + // TODO: once mashalling/unmarshalling of iterators is implemented, do a similar test for unmarshalled iterators. + pf := &pageFetcherStub{ + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "", + rows: [][]Value{{1, 2}, {11, 12}}, + }, + }, + }, + } + it := newIterator(nil, pf) + var vals ValueList + if err := it.Get(&vals); err == nil { + t.Errorf("Expected error calling Get before Next") + } +} + +type delayedPageFetcher struct { + pageFetcherStub + delayCount int +} + +func (pf *delayedPageFetcher) fetch(ctx context.Context, c *Client, token string) (*readDataResult, error) { + if pf.delayCount > 0 { + pf.delayCount-- + return nil, errIncompleteJob + } + return pf.pageFetcherStub.fetch(ctx, c, token) +} + +func TestIterateIncompleteJob(t *testing.T) { + want := []ValueList{{1, 2}, {11, 12}, {101, 102}, {111, 112}} + pf := pageFetcherStub{ + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "a", + rows: [][]Value{{1, 2}, {11, 12}}, + }, + }, + "a": { + result: &readDataResult{ + pageToken: "", + rows: [][]Value{{101, 102}, {111, 112}}, + }, + }, + }, + } + dpf := &delayedPageFetcher{ + pageFetcherStub: pf, + delayCount: 1, + } + it := newIterator(nil, dpf) + + values, err := consumeIterator(it) + if err != nil { + t.Fatal(err) + } + + if (len(values) != 0 || len(want) != 0) && !reflect.DeepEqual(values, want) { + t.Errorf("values: got:\n%v\nwant:\n%v", values, want) + } + if it.Err() != nil { + t.Fatalf("iterator.Err: got:\n%v", it.Err()) + } + if dpf.delayCount != 0 { + t.Errorf("delayCount: got: %v, want: 0", dpf.delayCount) + } +} + +func TestGetDuringErrorState(t *testing.T) { + pf := &pageFetcherStub{ + fetchResponses: map[string]fetchResponse{ + "": {err: errors.New("bang")}, + }, + } + it := newIterator(nil, pf) + var vals ValueList + it.Next(context.Background()) + if it.Err() == nil { + t.Errorf("Expected error after calling Next") + } + if err := it.Get(&vals); err == nil { + t.Errorf("Expected error calling Get when iterator has a non-nil error.") + } +} + +func TestGetAfterFinished(t *testing.T) { + testCases := []struct { + alreadyConsumed int64 // amount to advance offset before commencing reading. + fetchResponses map[string]fetchResponse + want []ValueList + }{ + { + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "", + rows: [][]Value{{1, 2}, {11, 12}}, + }, + }, + }, + want: []ValueList{{1, 2}, {11, 12}}, + }, + { + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "", + rows: [][]Value{}, + }, + }, + }, + want: []ValueList{}, + }, + { + alreadyConsumed: 100, + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "", + rows: [][]Value{{1, 2}, {11, 12}}, + }, + }, + }, + want: []ValueList{}, + }, + } + + for _, tc := range testCases { + pf := &pageFetcherStub{ + fetchResponses: tc.fetchResponses, + } + it := newIterator(nil, pf) + it.offset += tc.alreadyConsumed + + values, err := consumeIterator(it) + if err != nil { + t.Fatal(err) + } + + if (len(values) != 0 || len(tc.want) != 0) && !reflect.DeepEqual(values, tc.want) { + t.Errorf("values: got:\n%v\nwant:\n%v", values, tc.want) + } + if it.Err() != nil { + t.Fatalf("iterator.Err: got:\n%v\nwant:\n:nil", it.Err()) + } + // Try calling Get again. + var vals ValueList + if err := it.Get(&vals); err == nil { + t.Errorf("Expected error calling Get when there are no more values") + } + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/job.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/job.go new file mode 100644 index 000000000..832f0e9cb --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/job.go @@ -0,0 +1,124 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +// A Job represents an operation which has been submitted to BigQuery for processing. +type Job struct { + service service + projectID string + jobID string + + isQuery bool +} + +// JobFromID creates a Job which refers to an existing BigQuery job. The job +// need not have been created by this package. For example, the job may have +// been created in the BigQuery console. +func (c *Client) JobFromID(ctx context.Context, id string) (*Job, error) { + jobType, err := c.service.getJobType(ctx, c.projectID, id) + if err != nil { + return nil, err + } + + return &Job{ + service: c.service, + projectID: c.projectID, + jobID: id, + isQuery: jobType == queryJobType, + }, nil +} + +func (j *Job) ID() string { + return j.jobID +} + +// State is one of a sequence of states that a Job progresses through as it is processed. +type State int + +const ( + Pending State = iota + Running + Done +) + +// JobStatus contains the current State of a job, and errors encountered while processing that job. +type JobStatus struct { + State State + + err error + + // All errors encountered during the running of the job. + // Not all Errors are fatal, so errors here do not necessarily mean that the job has completed or was unsuccessful. + Errors []*Error +} + +// jobOption is an Option which modifies a bq.Job proto. +// This is used for configuring values that apply to all operations, such as setting a jobReference. +type jobOption interface { + customizeJob(job *bq.Job, projectID string) +} + +type jobID string + +// JobID returns an Option that sets the job ID of a BigQuery job. +// If this Option is not used, a job ID is generated automatically. +func JobID(ID string) Option { + return jobID(ID) +} + +func (opt jobID) implementsOption() {} + +func (opt jobID) customizeJob(job *bq.Job, projectID string) { + job.JobReference = &bq.JobReference{ + JobId: string(opt), + ProjectId: projectID, + } +} + +// Done reports whether the job has completed. +// After Done returns true, the Err method will return an error if the job completed unsuccesfully. +func (s *JobStatus) Done() bool { + return s.State == Done +} + +// Err returns the error that caused the job to complete unsuccesfully (if any). +func (s *JobStatus) Err() error { + return s.err +} + +// Status returns the current status of the job. It fails if the Status could not be determined. +func (j *Job) Status(ctx context.Context) (*JobStatus, error) { + return j.service.jobStatus(ctx, j.projectID, j.jobID) +} + +func (j *Job) implementsReadSource() {} + +func (j *Job) customizeReadQuery(cursor *readQueryConf) error { + // There are mulitple kinds of jobs, but only a query job is suitable for reading. + if !j.isQuery { + return errors.New("Cannot read from a non-query job") + } + + cursor.projectID = j.projectID + cursor.jobID = j.jobID + return nil +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/load_op.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/load_op.go new file mode 100644 index 000000000..46ec88cdc --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/load_op.go @@ -0,0 +1,112 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "fmt" + + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +type loadOption interface { + customizeLoad(conf *bq.JobConfigurationLoad, projectID string) +} + +// DestinationSchema returns an Option that specifies the schema to use when loading data into a new table. +// A DestinationSchema Option must be supplied when loading data from Google Cloud Storage into a non-existent table. +// Caveat: DestinationSchema is not required if the data being loaded is a datastore backup. +// schema must not be nil. +func DestinationSchema(schema Schema) Option { return destSchema{Schema: schema} } + +type destSchema struct { + Schema +} + +func (opt destSchema) implementsOption() {} + +func (opt destSchema) customizeLoad(conf *bq.JobConfigurationLoad, projectID string) { + conf.Schema = opt.asTableSchema() +} + +// MaxBadRecords returns an Option that sets the maximum number of bad records that will be ignored. +// If this maximum is exceeded, the operation will be unsuccessful. +func MaxBadRecords(n int64) Option { return maxBadRecords(n) } + +type maxBadRecords int64 + +func (opt maxBadRecords) implementsOption() {} + +func (opt maxBadRecords) customizeLoad(conf *bq.JobConfigurationLoad, projectID string) { + conf.MaxBadRecords = int64(opt) +} + +// AllowJaggedRows returns an Option that causes missing trailing optional columns to be tolerated in CSV data. Missing values are treated as nulls. +func AllowJaggedRows() Option { return allowJaggedRows{} } + +type allowJaggedRows struct{} + +func (opt allowJaggedRows) implementsOption() {} + +func (opt allowJaggedRows) customizeLoad(conf *bq.JobConfigurationLoad, projectID string) { + conf.AllowJaggedRows = true +} + +// AllowQuotedNewlines returns an Option that allows quoted data sections containing newlines in CSV data. +func AllowQuotedNewlines() Option { return allowQuotedNewlines{} } + +type allowQuotedNewlines struct{} + +func (opt allowQuotedNewlines) implementsOption() {} + +func (opt allowQuotedNewlines) customizeLoad(conf *bq.JobConfigurationLoad, projectID string) { + conf.AllowQuotedNewlines = true +} + +// IgnoreUnknownValues returns an Option that causes values not matching the schema to be tolerated. +// Unknown values are ignored. For CSV this ignores extra values at the end of a line. +// For JSON this ignores named values that do not match any column name. +// If this Option is not used, records containing unknown values are treated as bad records. +// The MaxBadRecords Option can be used to customize how bad records are handled. +func IgnoreUnknownValues() Option { return ignoreUnknownValues{} } + +type ignoreUnknownValues struct{} + +func (opt ignoreUnknownValues) implementsOption() {} + +func (opt ignoreUnknownValues) customizeLoad(conf *bq.JobConfigurationLoad, projectID string) { + conf.IgnoreUnknownValues = true +} + +func (c *Client) load(ctx context.Context, dst *Table, src *GCSReference, options []Option) (*Job, error) { + job, options := initJobProto(c.projectID, options) + payload := &bq.JobConfigurationLoad{} + + dst.customizeLoadDst(payload, c.projectID) + src.customizeLoadSrc(payload, c.projectID) + + for _, opt := range options { + o, ok := opt.(loadOption) + if !ok { + return nil, fmt.Errorf("option (%#v) not applicable to dst/src pair: dst: %T ; src: %T", opt, dst, src) + } + o.customizeLoad(payload, c.projectID) + } + + job.Configuration = &bq.JobConfiguration{ + Load: payload, + } + return c.service.insertJob(ctx, job, c.projectID) +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/load_test.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/load_test.go new file mode 100644 index 000000000..df6cec0cf --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/load_test.go @@ -0,0 +1,198 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "reflect" + "testing" + + "golang.org/x/net/context" + + bq "google.golang.org/api/bigquery/v2" +) + +func defaultLoadJob() *bq.Job { + return &bq.Job{ + Configuration: &bq.JobConfiguration{ + Load: &bq.JobConfigurationLoad{ + DestinationTable: &bq.TableReference{ + ProjectId: "project-id", + DatasetId: "dataset-id", + TableId: "table-id", + }, + SourceUris: []string{"uri"}, + }, + }, + } +} + +func stringFieldSchema() *FieldSchema { + return &FieldSchema{Name: "fieldname", Type: StringFieldType} +} + +func nestedFieldSchema() *FieldSchema { + return &FieldSchema{ + Name: "nested", + Type: RecordFieldType, + Schema: Schema{stringFieldSchema()}, + } +} + +func bqStringFieldSchema() *bq.TableFieldSchema { + return &bq.TableFieldSchema{ + Name: "fieldname", + Type: "STRING", + } +} + +func bqNestedFieldSchema() *bq.TableFieldSchema { + return &bq.TableFieldSchema{ + Name: "nested", + Type: "RECORD", + Fields: []*bq.TableFieldSchema{bqStringFieldSchema()}, + } +} + +func TestLoad(t *testing.T) { + testCases := []struct { + dst *Table + src *GCSReference + options []Option + want *bq.Job + }{ + { + dst: defaultTable, + src: defaultGCS, + want: defaultLoadJob(), + }, + { + dst: defaultTable, + src: defaultGCS, + options: []Option{ + MaxBadRecords(1), + AllowJaggedRows(), + AllowQuotedNewlines(), + IgnoreUnknownValues(), + }, + want: func() *bq.Job { + j := defaultLoadJob() + j.Configuration.Load.MaxBadRecords = 1 + j.Configuration.Load.AllowJaggedRows = true + j.Configuration.Load.AllowQuotedNewlines = true + j.Configuration.Load.IgnoreUnknownValues = true + return j + }(), + }, + { + dst: &Table{ + ProjectID: "project-id", + DatasetID: "dataset-id", + TableID: "table-id", + }, + options: []Option{CreateNever, WriteTruncate}, + src: defaultGCS, + want: func() *bq.Job { + j := defaultLoadJob() + j.Configuration.Load.CreateDisposition = "CREATE_NEVER" + j.Configuration.Load.WriteDisposition = "WRITE_TRUNCATE" + return j + }(), + }, + { + dst: &Table{ + ProjectID: "project-id", + DatasetID: "dataset-id", + TableID: "table-id", + }, + src: defaultGCS, + options: []Option{ + DestinationSchema(Schema{ + stringFieldSchema(), + nestedFieldSchema(), + }), + }, + want: func() *bq.Job { + j := defaultLoadJob() + j.Configuration.Load.Schema = &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqStringFieldSchema(), + bqNestedFieldSchema(), + }} + return j + }(), + }, + { + dst: defaultTable, + src: &GCSReference{ + uris: []string{"uri"}, + SkipLeadingRows: 1, + SourceFormat: JSON, + Encoding: UTF_8, + FieldDelimiter: "\t", + Quote: "-", + }, + want: func() *bq.Job { + j := defaultLoadJob() + j.Configuration.Load.SkipLeadingRows = 1 + j.Configuration.Load.SourceFormat = "NEWLINE_DELIMITED_JSON" + j.Configuration.Load.Encoding = "UTF-8" + j.Configuration.Load.FieldDelimiter = "\t" + hyphen := "-" + j.Configuration.Load.Quote = &hyphen + return j + }(), + }, + { + dst: defaultTable, + src: &GCSReference{ + uris: []string{"uri"}, + Quote: "", + }, + want: func() *bq.Job { + j := defaultLoadJob() + j.Configuration.Load.Quote = nil + return j + }(), + }, + { + dst: defaultTable, + src: &GCSReference{ + uris: []string{"uri"}, + Quote: "", + ForceZeroQuote: true, + }, + want: func() *bq.Job { + j := defaultLoadJob() + empty := "" + j.Configuration.Load.Quote = &empty + return j + }(), + }, + } + + for _, tc := range testCases { + s := &testService{} + c := &Client{ + service: s, + } + if _, err := c.Copy(context.Background(), tc.dst, tc.src, tc.options...); err != nil { + t.Errorf("err calling load: %v", err) + continue + } + if !reflect.DeepEqual(s.Job, tc.want) { + t.Errorf("loading: got:\n%v\nwant:\n%v", s.Job, tc.want) + } + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/query.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/query.go new file mode 100644 index 000000000..99807a73e --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/query.go @@ -0,0 +1,42 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import bq "google.golang.org/api/bigquery/v2" + +// Query represents a query to be executed. +type Query struct { + // The query to execute. See https://cloud.google.com/bigquery/query-reference for details. + Q string + + // DefaultProjectID and DefaultDatasetID specify the dataset to use for unqualified table names in the query. + // If DefaultProjectID is set, DefaultDatasetID must also be set. + DefaultProjectID string + DefaultDatasetID string +} + +func (q *Query) implementsSource() {} + +func (q *Query) implementsReadSource() {} + +func (q *Query) customizeQuerySrc(conf *bq.JobConfigurationQuery, projectID string) { + conf.Query = q.Q + if q.DefaultProjectID != "" || q.DefaultDatasetID != "" { + conf.DefaultDataset = &bq.DatasetReference{ + DatasetId: q.DefaultDatasetID, + ProjectId: q.DefaultProjectID, + } + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/query_op.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/query_op.go new file mode 100644 index 000000000..af62b5b3b --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/query_op.go @@ -0,0 +1,89 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "fmt" + + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +type queryOption interface { + customizeQuery(conf *bq.JobConfigurationQuery, projectID string) +} + +// DisableQueryCache returns an Option that prevents results being fetched from the query cache. +// If this Option is not used, results are fetched from the cache if they are available. +// The query cache is a best-effort cache that is flushed whenever tables in the query are modified. +// Cached results are only available when TableID is unspecified in the query's destination Table. +// For more information, see https://cloud.google.com/bigquery/querying-data#querycaching +func DisableQueryCache() Option { return disableQueryCache{} } + +type disableQueryCache struct{} + +func (opt disableQueryCache) implementsOption() {} + +func (opt disableQueryCache) customizeQuery(conf *bq.JobConfigurationQuery, projectID string) { + f := false + conf.UseQueryCache = &f +} + +// JobPriority returns an Option that causes a query to be scheduled with the specified priority. +// The default priority is InteractivePriority. +// For more information, see https://cloud.google.com/bigquery/querying-data#batchqueries +func JobPriority(priority string) Option { return jobPriority(priority) } + +type jobPriority string + +func (opt jobPriority) implementsOption() {} + +func (opt jobPriority) customizeQuery(conf *bq.JobConfigurationQuery, projectID string) { + conf.Priority = string(opt) +} + +const ( + BatchPriority = "BATCH" + InteractivePriority = "INTERACTIVE" +) + +// TODO(mcgreevy): support large results. +// TODO(mcgreevy): support non-flattened results. + +func (c *Client) query(ctx context.Context, dst *Table, src *Query, options []Option) (*Job, error) { + job, options := initJobProto(c.projectID, options) + payload := &bq.JobConfigurationQuery{} + + dst.customizeQueryDst(payload, c.projectID) + src.customizeQuerySrc(payload, c.projectID) + + for _, opt := range options { + o, ok := opt.(queryOption) + if !ok { + return nil, fmt.Errorf("option (%#v) not applicable to dst/src pair: dst: %T ; src: %T", opt, dst, src) + } + o.customizeQuery(payload, c.projectID) + } + + job.Configuration = &bq.JobConfiguration{ + Query: payload, + } + j, err := c.service.insertJob(ctx, job, c.projectID) + if err != nil { + return nil, err + } + j.isQuery = true + return j, nil +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/query_test.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/query_test.go new file mode 100644 index 000000000..119d01b2b --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/query_test.go @@ -0,0 +1,118 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "reflect" + "testing" + + "golang.org/x/net/context" + + bq "google.golang.org/api/bigquery/v2" +) + +func defaultQueryJob() *bq.Job { + return &bq.Job{ + Configuration: &bq.JobConfiguration{ + Query: &bq.JobConfigurationQuery{ + DestinationTable: &bq.TableReference{ + ProjectId: "project-id", + DatasetId: "dataset-id", + TableId: "table-id", + }, + Query: "query string", + DefaultDataset: &bq.DatasetReference{ + ProjectId: "def-project-id", + DatasetId: "def-dataset-id", + }, + }, + }, + } +} + +func TestQuery(t *testing.T) { + testCases := []struct { + dst *Table + src *Query + options []Option + want *bq.Job + }{ + { + dst: defaultTable, + src: defaultQuery, + want: defaultQueryJob(), + }, + { + dst: defaultTable, + src: &Query{ + Q: "query string", + }, + want: func() *bq.Job { + j := defaultQueryJob() + j.Configuration.Query.DefaultDataset = nil + return j + }(), + }, + { + dst: &Table{}, + src: defaultQuery, + want: func() *bq.Job { + j := defaultQueryJob() + j.Configuration.Query.DestinationTable = nil + return j + }(), + }, + { + dst: &Table{ + ProjectID: "project-id", + DatasetID: "dataset-id", + TableID: "table-id", + }, + src: defaultQuery, + options: []Option{CreateNever, WriteTruncate}, + want: func() *bq.Job { + j := defaultQueryJob() + j.Configuration.Query.WriteDisposition = "WRITE_TRUNCATE" + j.Configuration.Query.CreateDisposition = "CREATE_NEVER" + return j + }(), + }, + { + dst: defaultTable, + src: defaultQuery, + options: []Option{DisableQueryCache()}, + want: func() *bq.Job { + j := defaultQueryJob() + f := false + j.Configuration.Query.UseQueryCache = &f + return j + }(), + }, + } + + for _, tc := range testCases { + s := &testService{} + c := &Client{ + service: s, + } + if _, err := c.Copy(context.Background(), tc.dst, tc.src, tc.options...); err != nil { + t.Errorf("err calling query: %v", err) + continue + } + if !reflect.DeepEqual(s.Job, tc.want) { + t.Errorf("querying: got:\n%v\nwant:\n%v", s.Job, tc.want) + } + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/read_op.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/read_op.go new file mode 100644 index 000000000..0159b12a2 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/read_op.go @@ -0,0 +1,68 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import "golang.org/x/net/context" + +// RecordsPerRequest returns a ReadOption that sets the number of records to fetch per request when streaming data from BigQuery. +func RecordsPerRequest(n int64) ReadOption { return recordsPerRequest(n) } + +type recordsPerRequest int64 + +func (opt recordsPerRequest) customizeRead(conf *pagingConf) { + conf.recordsPerRequest = int64(opt) + conf.setRecordsPerRequest = true +} + +// StartIndex returns a ReadOption that sets the zero-based index of the row to start reading from. +func StartIndex(i uint64) ReadOption { return startIndex(i) } + +type startIndex uint64 + +func (opt startIndex) customizeRead(conf *pagingConf) { + conf.startIndex = uint64(opt) +} + +func (conf *readTableConf) fetch(ctx context.Context, c *Client, token string) (*readDataResult, error) { + return c.service.readTabledata(ctx, conf, token) +} + +func (c *Client) readTable(t *Table, options []ReadOption) (*Iterator, error) { + conf := &readTableConf{} + t.customizeReadSrc(conf) + + for _, o := range options { + o.customizeRead(&conf.paging) + } + + return newIterator(c, conf), nil +} + +func (conf *readQueryConf) fetch(ctx context.Context, c *Client, token string) (*readDataResult, error) { + return c.service.readQuery(ctx, conf, token) +} + +func (c *Client) readQueryResults(job *Job, options []ReadOption) (*Iterator, error) { + conf := &readQueryConf{} + if err := job.customizeReadQuery(conf); err != nil { + return nil, err + } + + for _, o := range options { + o.customizeRead(&conf.paging) + } + + return newIterator(c, conf), nil +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/read_test.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/read_test.go new file mode 100644 index 000000000..a03bd3f45 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/read_test.go @@ -0,0 +1,308 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "reflect" + "testing" + + "golang.org/x/net/context" +) + +type readTabledataArgs struct { + conf *readTableConf + tok string +} + +type readQueryArgs struct { + conf *readQueryConf + tok string +} + +// readServiceStub services read requests by returning data from an in-memory list of values. +type readServiceStub struct { + // values and pageTokens are used as sources of data to return in response to calls to readTabledata or readQuery. + values [][][]Value // contains pages / rows / columns. + pageTokens map[string]string // maps incoming page token to returned page token. + + // arguments are recorded for later inspection. + readTabledataCalls []readTabledataArgs + readQueryCalls []readQueryArgs + + service +} + +func (s *readServiceStub) readValues(tok string) *readDataResult { + result := &readDataResult{ + pageToken: s.pageTokens[tok], + rows: s.values[0], + } + s.values = s.values[1:] + + return result +} +func (s *readServiceStub) readTabledata(ctx context.Context, conf *readTableConf, token string) (*readDataResult, error) { + s.readTabledataCalls = append(s.readTabledataCalls, readTabledataArgs{conf, token}) + return s.readValues(token), nil +} + +func (s *readServiceStub) readQuery(ctx context.Context, conf *readQueryConf, token string) (*readDataResult, error) { + s.readQueryCalls = append(s.readQueryCalls, readQueryArgs{conf, token}) + return s.readValues(token), nil +} + +func TestRead(t *testing.T) { + // The data for the service stub to return is populated for each test case in the testCases for loop. + service := &readServiceStub{} + c := &Client{ + service: service, + } + + queryJob := &Job{ + projectID: "project-id", + jobID: "job-id", + service: service, + isQuery: true, + } + + for _, src := range []ReadSource{defaultTable, queryJob} { + testCases := []struct { + data [][][]Value + pageTokens map[string]string + want []ValueList + }{ + { + data: [][][]Value{{{1, 2}, {11, 12}}, {{30, 40}, {31, 41}}}, + pageTokens: map[string]string{"": "a", "a": ""}, + want: []ValueList{{1, 2}, {11, 12}, {30, 40}, {31, 41}}, + }, + { + data: [][][]Value{{{1, 2}, {11, 12}}, {{30, 40}, {31, 41}}}, + pageTokens: map[string]string{"": ""}, // no more pages after first one. + want: []ValueList{{1, 2}, {11, 12}}, + }, + } + + for _, tc := range testCases { + service.values = tc.data + service.pageTokens = tc.pageTokens + if got, ok := doRead(t, c, src); ok { + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("reading: got:\n%v\nwant:\n%v", got, tc.want) + } + } + } + } +} + +// doRead calls Read with a ReadSource. Get is repeatedly called on the Iterator returned by Read and the results are returned. +func doRead(t *testing.T, c *Client, src ReadSource) ([]ValueList, bool) { + it, err := c.Read(context.Background(), src) + if err != nil { + t.Errorf("err calling Read: %v", err) + return nil, false + } + var got []ValueList + for it.Next(context.Background()) { + var vals ValueList + if err := it.Get(&vals); err != nil { + t.Errorf("err calling Get: %v", err) + return nil, false + } else { + got = append(got, vals) + } + } + + return got, true +} + +func TestNoMoreValues(t *testing.T) { + c := &Client{ + service: &readServiceStub{ + values: [][][]Value{{{1, 2}, {11, 12}}}, + }, + } + it, err := c.Read(context.Background(), defaultTable) + if err != nil { + t.Fatalf("err calling Read: %v", err) + } + var vals ValueList + // We expect to retrieve two values and then fail on the next attempt. + if !it.Next(context.Background()) { + t.Fatalf("Next: got: false: want: true") + } + if !it.Next(context.Background()) { + t.Fatalf("Next: got: false: want: true") + } + if err := it.Get(&vals); err != nil { + t.Fatalf("Get: got: %v: want: nil", err) + } + if it.Next(context.Background()) { + t.Fatalf("Next: got: true: want: false") + } + if err := it.Get(&vals); err == nil { + t.Fatalf("Get: got: %v: want: non-nil", err) + } +} + +// delayedReadStub simulates reading results from a query that has not yet +// completed. Its readQuery method initially reports that the query job is not +// yet complete. Subsequently, it proxies the request through to another +// service stub. +type delayedReadStub struct { + numDelays int + + readServiceStub +} + +func (s *delayedReadStub) readQuery(ctx context.Context, conf *readQueryConf, token string) (*readDataResult, error) { + if s.numDelays > 0 { + s.numDelays-- + return nil, errIncompleteJob + } + return s.readServiceStub.readQuery(ctx, conf, token) +} + +// TestIncompleteJob tests that an Iterator which reads from a query job will block until the job is complete. +func TestIncompleteJob(t *testing.T) { + service := &delayedReadStub{ + numDelays: 2, + readServiceStub: readServiceStub{ + values: [][][]Value{{{1, 2}}}, + }, + } + c := &Client{service: service} + queryJob := &Job{ + projectID: "project-id", + jobID: "job-id", + service: service, + isQuery: true, + } + it, err := c.Read(context.Background(), queryJob) + if err != nil { + t.Fatalf("err calling Read: %v", err) + } + var got ValueList + want := ValueList{1, 2} + if !it.Next(context.Background()) { + t.Fatalf("Next: got: false: want: true") + } + if err := it.Get(&got); err != nil { + t.Fatalf("Error calling Get: %v", err) + } + if service.numDelays != 0 { + t.Errorf("remaining numDelays : got: %v want:0", service.numDelays) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("reading: got:\n%v\nwant:\n%v", got, want) + } +} + +type errorReadService struct { + service +} + +func (s *errorReadService) readTabledata(ctx context.Context, conf *readTableConf, token string) (*readDataResult, error) { + return nil, errors.New("bang!") +} + +func TestReadError(t *testing.T) { + // test that service read errors are propagated back to the caller. + c := &Client{service: &errorReadService{}} + it, err := c.Read(context.Background(), defaultTable) + if err != nil { + // Read should not return an error; only Err should. + t.Fatalf("err calling Read: %v", err) + } + if it.Next(context.Background()) { + t.Fatalf("Next: got: true: want: false") + } + if err := it.Err(); err.Error() != "bang!" { + t.Fatalf("Get: got: %v: want: bang!", err) + } +} + +func TestReadTabledataOptions(t *testing.T) { + // test that read options are propagated. + s := &readServiceStub{ + values: [][][]Value{{{1, 2}}}, + } + c := &Client{service: s} + it, err := c.Read(context.Background(), defaultTable, RecordsPerRequest(5)) + + if err != nil { + t.Fatalf("err calling Read: %v", err) + } + if !it.Next(context.Background()) { + t.Fatalf("Next: got: false: want: true") + } + + want := []readTabledataArgs{{ + conf: &readTableConf{ + projectID: "project-id", + datasetID: "dataset-id", + tableID: "table-id", + paging: pagingConf{ + recordsPerRequest: 5, + setRecordsPerRequest: true, + }, + }, + tok: "", + }} + + if !reflect.DeepEqual(s.readTabledataCalls, want) { + t.Errorf("reading: got:\n%v\nwant:\n%v", s.readTabledataCalls, want) + } +} + +func TestReadQueryOptions(t *testing.T) { + // test that read options are propagated. + s := &readServiceStub{ + values: [][][]Value{{{1, 2}}}, + } + c := &Client{service: s} + + queryJob := &Job{ + projectID: "project-id", + jobID: "job-id", + service: s, + isQuery: true, + } + it, err := c.Read(context.Background(), queryJob, RecordsPerRequest(5)) + + if err != nil { + t.Fatalf("err calling Read: %v", err) + } + if !it.Next(context.Background()) { + t.Fatalf("Next: got: false: want: true") + } + + want := []readQueryArgs{{ + conf: &readQueryConf{ + projectID: "project-id", + jobID: "job-id", + paging: pagingConf{ + recordsPerRequest: 5, + setRecordsPerRequest: true, + }, + }, + tok: "", + }} + + if !reflect.DeepEqual(s.readQueryCalls, want) { + t.Errorf("reading: got:\n%v\nwant:\n%v", s.readQueryCalls, want) + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/schema.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/schema.go new file mode 100644 index 000000000..abc159a53 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/schema.go @@ -0,0 +1,106 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import bq "google.golang.org/api/bigquery/v2" + +// Schema describes the fields in a table or query result. +type Schema []*FieldSchema + +// TODO(mcgreevy): add a function to generate a schema from a struct. + +type FieldSchema struct { + // The field name. + // Must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_), + // and must start with a letter or underscore. + // The maximum length is 128 characters. + Name string + + // A description of the field. The maximum length is 16,384 characters. + Description string + + // Whether the field may contain multiple values. + Repeated bool + // Whether the field is required. Ignored if Repeated is true. + Required bool + + // The field data type. If Type is Record, then this field contains a nested schema, + // which is described by Schema. + Type FieldType + // Describes the nested schema if Type is set to Record. + Schema Schema +} + +func (fs *FieldSchema) asTableFieldSchema() *bq.TableFieldSchema { + tfs := &bq.TableFieldSchema{ + Description: fs.Description, + Name: fs.Name, + Type: string(fs.Type), + } + + if fs.Repeated { + tfs.Mode = "REPEATED" + } else if fs.Required { + tfs.Mode = "REQUIRED" + } // else leave as default, which is interpreted as NULLABLE. + + for _, f := range fs.Schema { + tfs.Fields = append(tfs.Fields, f.asTableFieldSchema()) + } + + return tfs +} + +func (s Schema) asTableSchema() *bq.TableSchema { + var fields []*bq.TableFieldSchema + for _, f := range s { + fields = append(fields, f.asTableFieldSchema()) + } + return &bq.TableSchema{Fields: fields} +} + +func convertTableFieldSchema(tfs *bq.TableFieldSchema) *FieldSchema { + fs := &FieldSchema{ + Description: tfs.Description, + Name: tfs.Name, + Repeated: tfs.Mode == "REPEATED", + Required: tfs.Mode == "REQUIRED", + Type: FieldType(tfs.Type), + } + + for _, f := range tfs.Fields { + fs.Schema = append(fs.Schema, convertTableFieldSchema(f)) + } + return fs +} + +func convertTableSchema(ts *bq.TableSchema) Schema { + var s Schema + for _, f := range ts.Fields { + s = append(s, convertTableFieldSchema(f)) + } + return s +} + +type FieldType string + +const ( + StringFieldType FieldType = "STRING" + IntegerFieldType FieldType = "INTEGER" + FloatFieldType FieldType = "FLOAT" + BooleanFieldType FieldType = "BOOLEAN" + TimestampFieldType FieldType = "TIMESTAMP" + RecordFieldType FieldType = "RECORD" +) diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/schema_test.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/schema_test.go new file mode 100644 index 000000000..4ff147fe0 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/schema_test.go @@ -0,0 +1,168 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "reflect" + "testing" + + bq "google.golang.org/api/bigquery/v2" +) + +func bqTableFieldSchema(desc, name, typ, mode string) *bq.TableFieldSchema { + return &bq.TableFieldSchema{ + Description: desc, + Name: name, + Mode: mode, + Type: typ, + } +} + +func fieldSchema(desc, name, typ string, repeated, required bool) *FieldSchema { + return &FieldSchema{ + Description: desc, + Name: name, + Repeated: repeated, + Required: required, + Type: FieldType(typ), + } +} + +func TestSchemaConversion(t *testing.T) { + testCases := []struct { + schema Schema + bqSchema *bq.TableSchema + }{ + { + // required + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"), + }, + }, + schema: Schema{ + fieldSchema("desc", "name", "STRING", false, true), + }, + }, + { + // repeated + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "STRING", "REPEATED"), + }, + }, + schema: Schema{ + fieldSchema("desc", "name", "STRING", true, false), + }, + }, + { + // nullable, string + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "STRING", ""), + }, + }, + schema: Schema{ + fieldSchema("desc", "name", "STRING", false, false), + }, + }, + { + // integer + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "INTEGER", ""), + }, + }, + schema: Schema{ + fieldSchema("desc", "name", "INTEGER", false, false), + }, + }, + { + // float + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "FLOAT", ""), + }, + }, + schema: Schema{ + fieldSchema("desc", "name", "FLOAT", false, false), + }, + }, + { + // boolean + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "BOOLEAN", ""), + }, + }, + schema: Schema{ + fieldSchema("desc", "name", "BOOLEAN", false, false), + }, + }, + { + // timestamp + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "TIMESTAMP", ""), + }, + }, + schema: Schema{ + fieldSchema("desc", "name", "TIMESTAMP", false, false), + }, + }, + { + // nested + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + &bq.TableFieldSchema{ + Description: "An outer schema wrapping a nested schema", + Name: "outer", + Mode: "REQUIRED", + Type: "RECORD", + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("inner field", "inner", "STRING", ""), + }, + }, + }, + }, + schema: Schema{ + &FieldSchema{ + Description: "An outer schema wrapping a nested schema", + Name: "outer", + Required: true, + Type: "RECORD", + Schema: []*FieldSchema{ + &FieldSchema{ + Description: "inner field", + Name: "inner", + Type: "STRING", + }, + }, + }, + }, + }, + } + + for _, tc := range testCases { + bqSchema := tc.schema.asTableSchema() + if !reflect.DeepEqual(bqSchema, tc.bqSchema) { + t.Errorf("converting to TableSchema: got:\n%v\nwant:\n%v", bqSchema, tc.bqSchema) + } + schema := convertTableSchema(tc.bqSchema) + if !reflect.DeepEqual(schema, tc.schema) { + t.Errorf("converting to Schema: got:\n%v\nwant:\n%v", schema, tc.schema) + } + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/service.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/service.go new file mode 100644 index 000000000..1e5bd1a0c --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/service.go @@ -0,0 +1,268 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "fmt" + "net/http" + "time" + + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +// service provides an internal abstraction to isolate the generated +// BigQuery API; most of this package uses this interface instead. +// The single implementation, *bigqueryService, contains all the knowledge +// of the generated BigQuery API. +type service interface { + insertJob(ctx context.Context, job *bq.Job, projectId string) (*Job, error) + getJobType(ctx context.Context, projectId, jobID string) (jobType, error) + jobStatus(ctx context.Context, projectId, jobID string) (*JobStatus, error) + listTables(ctx context.Context, projectID, datasetID, pageToken string) ([]*Table, string, error) + + // readQuery reads data resulting from a query job. If the job is not + // yet complete, an errIncompleteJob is returned. readQuery may be + // called repeatedly to wait for results indefinitely. + readQuery(ctx context.Context, conf *readQueryConf, pageToken string) (*readDataResult, error) + + readTabledata(ctx context.Context, conf *readTableConf, pageToken string) (*readDataResult, error) +} + +type bigqueryService struct { + s *bq.Service +} + +func newBigqueryService(client *http.Client) (*bigqueryService, error) { + s, err := bq.New(client) + if err != nil { + return nil, fmt.Errorf("constructing bigquery client: %v", err) + } + + return &bigqueryService{s: s}, nil +} + +// getPages calls the supplied getPage function repeatedly until there are no pages left to get. +// token is the token of the initial page to start from. Use an empty string to start from the beginning. +func getPages(token string, getPage func(token string) (nextToken string, err error)) error { + for { + var err error + token, err = getPage(token) + if err != nil { + return err + } + if token == "" { + return nil + } + } +} + +func (s *bigqueryService) insertJob(ctx context.Context, job *bq.Job, projectID string) (*Job, error) { + // TODO(mcgreevy): use ctx + res, err := s.s.Jobs.Insert(projectID, job).Do() + if err != nil { + return nil, err + } + return &Job{service: s, projectID: projectID, jobID: res.JobReference.JobId}, nil +} + +type pagingConf struct { + recordsPerRequest int64 + setRecordsPerRequest bool + + startIndex uint64 +} + +type readTableConf struct { + projectID, datasetID, tableID string + paging pagingConf +} + +type readDataResult struct { + pageToken string + rows [][]Value + totalRows uint64 + schema Schema +} + +type readQueryConf struct { + projectID, jobID string + paging pagingConf +} + +func (s *bigqueryService) readTabledata(ctx context.Context, conf *readTableConf, pageToken string) (*readDataResult, error) { + req := s.s.Tabledata.List(conf.projectID, conf.datasetID, conf.tableID). + PageToken(pageToken). + StartIndex(conf.paging.startIndex) + + if conf.paging.setRecordsPerRequest { + req = req.MaxResults(conf.paging.recordsPerRequest) + } + + res, err := req.Do() + if err != nil { + return nil, err + } + + result := &readDataResult{ + pageToken: res.PageToken, + rows: convertRows(res.Rows), + totalRows: uint64(res.TotalRows), + } + return result, nil +} + +var errIncompleteJob = errors.New("internal error: query results not available because job is not complete") + +// getQueryResultsTimeout controls the maximum duration of a request to the +// BigQuery GetQueryResults endpoint. Setting a long timeout here does not +// cause increased overall latency, as results are returned as soon as they are +// available. +const getQueryResultsTimeout = time.Minute + +func (s *bigqueryService) readQuery(ctx context.Context, conf *readQueryConf, pageToken string) (*readDataResult, error) { + req := s.s.Jobs.GetQueryResults(conf.projectID, conf.jobID). + PageToken(pageToken). + StartIndex(conf.paging.startIndex). + TimeoutMs(getQueryResultsTimeout.Nanoseconds() / 1000) + + if conf.paging.setRecordsPerRequest { + req = req.MaxResults(conf.paging.recordsPerRequest) + } + + res, err := req.Do() + if err != nil { + return nil, err + } + + if !res.JobComplete { + return nil, errIncompleteJob + } + + result := &readDataResult{ + pageToken: res.PageToken, + rows: convertRows(res.Rows), + totalRows: res.TotalRows, + schema: convertTableSchema(res.Schema), + } + return result, nil +} + +func convertRows(rows []*bq.TableRow) [][]Value { + convertRow := func(r *bq.TableRow) []Value { + var values []Value + for _, cell := range r.F { + values = append(values, cell.V) + } + return values + } + + var rs [][]Value + for _, r := range rows { + rs = append(rs, convertRow(r)) + } + return rs +} + +type jobType int + +const ( + copyJobType jobType = iota + extractJobType + loadJobType + queryJobType +) + +func (s *bigqueryService) getJobType(ctx context.Context, projectID, jobID string) (jobType, error) { + // TODO(mcgreevy): use ctx + res, err := s.s.Jobs.Get(projectID, jobID). + Fields("configuration"). + Do() + + if err != nil { + return 0, err + } + + switch { + case res.Configuration.Copy != nil: + return copyJobType, nil + case res.Configuration.Extract != nil: + return extractJobType, nil + case res.Configuration.Load != nil: + return loadJobType, nil + case res.Configuration.Query != nil: + return queryJobType, nil + default: + return 0, errors.New("unknown job type") + } +} + +func (s *bigqueryService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) { + // TODO(mcgreevy): use ctx + res, err := s.s.Jobs.Get(projectID, jobID). + Fields("status"). // Only fetch what we need. + Do() + if err != nil { + return nil, err + } + return jobStatusFromProto(res.Status) +} + +var stateMap = map[string]State{"PENDING": Pending, "RUNNING": Running, "DONE": Done} + +func jobStatusFromProto(status *bq.JobStatus) (*JobStatus, error) { + state, ok := stateMap[status.State] + if !ok { + return nil, fmt.Errorf("unexpected job state: %v", status.State) + } + + newStatus := &JobStatus{ + State: state, + err: nil, + } + if err := errorFromErrorProto(status.ErrorResult); state == Done && err != nil { + newStatus.err = err + } + + for _, ep := range status.Errors { + newStatus.Errors = append(newStatus.Errors, errorFromErrorProto(ep)) + } + return newStatus, nil +} + +// listTables returns a subset of tables that belong to a dataset, and a token for fetching the next subset. +func (s *bigqueryService) listTables(ctx context.Context, projectID, datasetID, pageToken string) ([]*Table, string, error) { + // TODO(mcgreevy): use ctx + var tables []*Table + res, err := s.s.Tables.List(projectID, datasetID). + PageToken(pageToken). + Do() + if err != nil { + return nil, "", err + } + for _, t := range res.Tables { + tables = append(tables, convertTable(t)) + } + return tables, res.NextPageToken, nil +} + +func convertTable(t *bq.TableListTables) *Table { + return &Table{ + ProjectID: t.TableReference.ProjectId, + DatasetID: t.TableReference.DatasetId, + TableID: t.TableReference.TableId, + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/table.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/table.go new file mode 100644 index 000000000..9cbd7bf69 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/table.go @@ -0,0 +1,157 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "fmt" + + bq "google.golang.org/api/bigquery/v2" +) + +// A Table is a reference to a BigQuery table. +type Table struct { + // ProjectID, DatasetID and TableID may be omitted if the Table is the destination for a query. + // In this case the result will be stored in an ephemeral table. + ProjectID string + DatasetID string + // TableID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + // The maximum length is 1,024 characters. + TableID string +} + +// Tables is a group of tables. The tables may belong to differing projects or datasets. +type Tables []*Table + +// CreateDisposition specifies the circumstances under which destination table will be created. +// Default is CreateIfNeeded. +type TableCreateDisposition string + +const ( + // The table will be created if it does not already exist. Tables are created atomically on successful completion of a job. + CreateIfNeeded TableCreateDisposition = "CREATE_IF_NEEDED" + + // The table must already exist and will not be automatically created. + CreateNever TableCreateDisposition = "CREATE_NEVER" +) + +func CreateDisposition(disp TableCreateDisposition) Option { return disp } + +func (opt TableCreateDisposition) implementsOption() {} + +func (opt TableCreateDisposition) customizeLoad(conf *bq.JobConfigurationLoad, projectID string) { + conf.CreateDisposition = string(opt) +} + +func (opt TableCreateDisposition) customizeCopy(conf *bq.JobConfigurationTableCopy, projectID string) { + conf.CreateDisposition = string(opt) +} + +func (opt TableCreateDisposition) customizeQuery(conf *bq.JobConfigurationQuery, projectID string) { + conf.CreateDisposition = string(opt) +} + +// TableWriteDisposition specifies how existing data in a destination table is treated. +// Default is WriteAppend. +type TableWriteDisposition string + +const ( + // Data will be appended to any existing data in the destination table. + // Data is appended atomically on successful completion of a job. + WriteAppend TableWriteDisposition = "WRITE_APPEND" + + // Existing data in the destination table will be overwritten. + // Data is overwritten atomically on successful completion of a job. + WriteTruncate TableWriteDisposition = "WRITE_TRUNCATE" + + // Writes will fail if the destination table already contains data. + WriteEmpty TableWriteDisposition = "WRITE_EMPTY" +) + +func WriteDisposition(disp TableWriteDisposition) Option { return disp } + +func (opt TableWriteDisposition) implementsOption() {} + +func (opt TableWriteDisposition) customizeLoad(conf *bq.JobConfigurationLoad, projectID string) { + conf.WriteDisposition = string(opt) +} + +func (opt TableWriteDisposition) customizeCopy(conf *bq.JobConfigurationTableCopy, projectID string) { + conf.WriteDisposition = string(opt) +} + +func (opt TableWriteDisposition) customizeQuery(conf *bq.JobConfigurationQuery, projectID string) { + conf.WriteDisposition = string(opt) +} + +// TableType is the type of table. +type TableType string + +const ( + RegularTable TableType = "TABLE" + ViewTable TableType = "VIEW" +) + +func (t *Table) implementsSource() {} +func (t *Table) implementsReadSource() {} +func (t *Table) implementsDestination() {} +func (ts Tables) implementsSource() {} + +func (t *Table) tableRefProto() *bq.TableReference { + return &bq.TableReference{ + ProjectId: t.ProjectID, + DatasetId: t.DatasetID, + TableId: t.TableID, + } +} + +// FullyQualifiedName returns the ID of the table in projectID:datasetID.tableID format. +func (t *Table) FullyQualifiedName() string { + return fmt.Sprintf("%s:%s.%s", t.ProjectID, t.DatasetID, t.TableID) +} + +// implicitTable reports whether Table is an empty placeholder, which signifies that a new table should be created with an auto-generated Table ID. +func (t *Table) implicitTable() bool { + return t.ProjectID == "" && t.DatasetID == "" && t.TableID == "" +} + +func (t *Table) customizeLoadDst(conf *bq.JobConfigurationLoad, projectID string) { + conf.DestinationTable = t.tableRefProto() +} + +func (t *Table) customizeExtractSrc(conf *bq.JobConfigurationExtract, projectID string) { + conf.SourceTable = t.tableRefProto() +} + +func (t *Table) customizeCopyDst(conf *bq.JobConfigurationTableCopy, projectID string) { + conf.DestinationTable = t.tableRefProto() +} + +func (ts Tables) customizeCopySrc(conf *bq.JobConfigurationTableCopy, projectID string) { + for _, t := range ts { + conf.SourceTables = append(conf.SourceTables, t.tableRefProto()) + } +} + +func (t *Table) customizeQueryDst(conf *bq.JobConfigurationQuery, projectID string) { + if !t.implicitTable() { + conf.DestinationTable = t.tableRefProto() + } +} + +func (t *Table) customizeReadSrc(cursor *readTableConf) { + cursor.projectID = t.ProjectID + cursor.datasetID = t.DatasetID + cursor.tableID = t.TableID +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/utils_test.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/utils_test.go new file mode 100644 index 000000000..6d441cb94 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/utils_test.go @@ -0,0 +1,51 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +var defaultTable = &Table{ + ProjectID: "project-id", + DatasetID: "dataset-id", + TableID: "table-id", +} + +var defaultGCS = &GCSReference{ + uris: []string{"uri"}, +} + +var defaultQuery = &Query{ + Q: "query string", + DefaultProjectID: "def-project-id", + DefaultDatasetID: "def-dataset-id", +} + +type testService struct { + *bq.Job + + service +} + +func (s *testService) insertJob(ctx context.Context, job *bq.Job, projectID string) (*Job, error) { + s.Job = job + return &Job{}, nil +} + +func (s *testService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) { + return &JobStatus{State: Done}, nil +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/value.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/value.go new file mode 100644 index 000000000..9619eb854 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/value.go @@ -0,0 +1,33 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +// Value stores the contents of a single cell from a BigQuery result. +type Value interface{} + +// ValueLoader stores a slice of Values representing a result row from a Read operation. +// See Iterator.Get for more information. +type ValueLoader interface { + Load(v []Value) error +} + +// ValueList converts a []Value to implement ValueLoader. +type ValueList []Value + +// Load stores a sequence of values in a ValueList. +func (vs *ValueList) Load(v []Value) error { + *vs = append(*vs, v...) + return nil +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/admin.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/admin.go new file mode 100644 index 000000000..9e4b79b82 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/admin.go @@ -0,0 +1,266 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bigtable + +import ( + "fmt" + "regexp" + "strings" + + "golang.org/x/net/context" + "google.golang.org/cloud" + btcspb "google.golang.org/cloud/bigtable/internal/cluster_service_proto" + bttspb "google.golang.org/cloud/bigtable/internal/table_service_proto" + "google.golang.org/cloud/internal/transport" + "google.golang.org/grpc" +) + +const adminAddr = "bigtabletableadmin.googleapis.com:443" + +// AdminClient is a client type for performing admin operations within a specific cluster. +type AdminClient struct { + conn *grpc.ClientConn + tClient bttspb.BigtableTableServiceClient + + project, zone, cluster string +} + +// NewAdminClient creates a new AdminClient for a given project, zone and cluster. +func NewAdminClient(ctx context.Context, project, zone, cluster string, opts ...cloud.ClientOption) (*AdminClient, error) { + o := []cloud.ClientOption{ + cloud.WithEndpoint(adminAddr), + cloud.WithScopes(AdminScope), + cloud.WithUserAgent(clientUserAgent), + } + o = append(o, opts...) + conn, err := transport.DialGRPC(ctx, o...) + if err != nil { + return nil, fmt.Errorf("dialing: %v", err) + } + return &AdminClient{ + conn: conn, + tClient: bttspb.NewBigtableTableServiceClient(conn), + + project: project, + zone: zone, + cluster: cluster, + }, nil +} + +// Close closes the AdminClient. +func (ac *AdminClient) Close() { + ac.conn.Close() +} + +func (ac *AdminClient) clusterPrefix() string { + return fmt.Sprintf("projects/%s/zones/%s/clusters/%s", ac.project, ac.zone, ac.cluster) +} + +// Tables returns a list of the tables in the cluster. +func (ac *AdminClient) Tables(ctx context.Context) ([]string, error) { + prefix := ac.clusterPrefix() + req := &bttspb.ListTablesRequest{ + Name: prefix, + } + res, err := ac.tClient.ListTables(ctx, req) + if err != nil { + return nil, err + } + names := make([]string, 0, len(res.Tables)) + for _, tbl := range res.Tables { + names = append(names, strings.TrimPrefix(tbl.Name, prefix+"/tables/")) + } + return names, nil +} + +// CreateTable creates a new table in the cluster. +// This method may return before the table's creation is complete. +func (ac *AdminClient) CreateTable(ctx context.Context, table string) error { + prefix := ac.clusterPrefix() + req := &bttspb.CreateTableRequest{ + Name: prefix, + TableId: table, + } + _, err := ac.tClient.CreateTable(ctx, req) + if err != nil { + return err + } + return nil +} + +// CreateColumnFamily creates a new column family in a table. +func (ac *AdminClient) CreateColumnFamily(ctx context.Context, table, family string) error { + // TODO(dsymonds): Permit specifying gcexpr and any other family settings. + prefix := ac.clusterPrefix() + req := &bttspb.CreateColumnFamilyRequest{ + Name: prefix + "/tables/" + table, + ColumnFamilyId: family, + } + _, err := ac.tClient.CreateColumnFamily(ctx, req) + return err +} + +// DeleteTable deletes a table and all of its data. +func (ac *AdminClient) DeleteTable(ctx context.Context, table string) error { + prefix := ac.clusterPrefix() + req := &bttspb.DeleteTableRequest{ + Name: prefix + "/tables/" + table, + } + _, err := ac.tClient.DeleteTable(ctx, req) + return err +} + +// DeleteColumnFamily deletes a column family in a table and all of its data. +func (ac *AdminClient) DeleteColumnFamily(ctx context.Context, table, family string) error { + prefix := ac.clusterPrefix() + req := &bttspb.DeleteColumnFamilyRequest{ + Name: prefix + "/tables/" + table + "/columnFamilies/" + family, + } + _, err := ac.tClient.DeleteColumnFamily(ctx, req) + return err +} + +// TableInfo represents information about a table. +type TableInfo struct { + Families []string +} + +// TableInfo retrieves information about a table. +func (ac *AdminClient) TableInfo(ctx context.Context, table string) (*TableInfo, error) { + prefix := ac.clusterPrefix() + req := &bttspb.GetTableRequest{ + Name: prefix + "/tables/" + table, + } + res, err := ac.tClient.GetTable(ctx, req) + if err != nil { + return nil, err + } + ti := &TableInfo{} + for fam := range res.ColumnFamilies { + ti.Families = append(ti.Families, fam) + } + return ti, nil +} + +// SetGCPolicy specifies which cells in a column family should be garbage collected. +// GC executes opportunistically in the background. +func (ac *AdminClient) SetGCPolicy(ctx context.Context, table, family string, policy GCPolicy) error { + prefix := ac.clusterPrefix() + tbl, err := ac.tClient.GetTable(ctx, &bttspb.GetTableRequest{ + Name: prefix + "/tables/" + table, + }) + if err != nil { + return err + } + fam, ok := tbl.ColumnFamilies[family] + if !ok { + return fmt.Errorf("unknown column family %q", family) + } + fam.GcRule = policy.proto() + _, err = ac.tClient.UpdateColumnFamily(ctx, fam) + return err +} + +const clusterAdminAddr = "bigtableclusteradmin.googleapis.com:443" + +// ClusterAdminClient is a client type for performing admin operations on clusters. +// These operations can be substantially more dangerous than those provided by AdminClient. +type ClusterAdminClient struct { + conn *grpc.ClientConn + cClient btcspb.BigtableClusterServiceClient + + project string +} + +// NewClusterAdminClient creates a new ClusterAdminClient for a given project. +func NewClusterAdminClient(ctx context.Context, project string, opts ...cloud.ClientOption) (*ClusterAdminClient, error) { + o := []cloud.ClientOption{ + cloud.WithEndpoint(clusterAdminAddr), + cloud.WithScopes(ClusterAdminScope), + cloud.WithUserAgent(clientUserAgent), + } + o = append(o, opts...) + conn, err := transport.DialGRPC(ctx, o...) + if err != nil { + return nil, fmt.Errorf("dialing: %v", err) + } + return &ClusterAdminClient{ + conn: conn, + cClient: btcspb.NewBigtableClusterServiceClient(conn), + + project: project, + }, nil +} + +// Close closes the ClusterAdminClient. +func (cac *ClusterAdminClient) Close() { + cac.conn.Close() +} + +// ClusterInfo represents information about a cluster. +type ClusterInfo struct { + Name string // name of the cluster + Zone string // GCP zone of the cluster (e.g. "us-central1-a") + DisplayName string // display name for UIs + ServeNodes int // number of allocated serve nodes +} + +var clusterNameRegexp = regexp.MustCompile(`^projects/([^/]+)/zones/([^/]+)/clusters/([a-z][-a-z0-9]*)$`) + +// Clusters returns a list of clusters in the project. +func (cac *ClusterAdminClient) Clusters(ctx context.Context) ([]*ClusterInfo, error) { + req := &btcspb.ListClustersRequest{ + Name: "projects/" + cac.project, + } + res, err := cac.cClient.ListClusters(ctx, req) + if err != nil { + return nil, err + } + // TODO(dsymonds): Deal with failed_zones. + var cis []*ClusterInfo + for _, c := range res.Clusters { + m := clusterNameRegexp.FindStringSubmatch(c.Name) + if m == nil { + return nil, fmt.Errorf("malformed cluster name %q", c.Name) + } + cis = append(cis, &ClusterInfo{ + Name: m[3], + Zone: m[2], + DisplayName: c.DisplayName, + ServeNodes: int(c.ServeNodes), + }) + } + return cis, nil +} + +/* TODO(dsymonds): Re-enable when there's a ClusterAdmin API. + +// SetClusterSize sets the number of server nodes for this cluster. +func (ac *AdminClient) SetClusterSize(ctx context.Context, nodes int) error { + req := &btcspb.GetClusterRequest{ + Name: ac.clusterPrefix(), + } + clu, err := ac.cClient.GetCluster(ctx, req) + if err != nil { + return err + } + clu.ServeNodes = int32(nodes) + _, err = ac.cClient.UpdateCluster(ctx, clu) + return err +} + +*/ diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/admin_test.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/admin_test.go new file mode 100644 index 000000000..191afc20c --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/admin_test.go @@ -0,0 +1,59 @@ +package bigtable + +import ( + "reflect" + "sort" + "testing" + "time" + + "golang.org/x/net/context" + "google.golang.org/cloud" + "google.golang.org/cloud/bigtable/bttest" + "google.golang.org/grpc" +) + +func TestAdminIntegration(t *testing.T) { + srv, err := bttest.NewServer() + if err != nil { + t.Fatal(err) + } + defer srv.Close() + t.Logf("bttest.Server running on %s", srv.Addr) + + ctx, _ := context.WithTimeout(context.Background(), 2*time.Second) + + conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) + if err != nil { + t.Fatalf("grpc.Dial: %v", err) + } + + adminClient, err := NewAdminClient(ctx, "proj", "zone", "cluster", cloud.WithBaseGRPC(conn)) + if err != nil { + t.Fatalf("NewAdminClient: %v", err) + } + defer adminClient.Close() + + list := func() []string { + tbls, err := adminClient.Tables(ctx) + if err != nil { + t.Fatalf("Fetching list of tables: %v", err) + } + sort.Strings(tbls) + return tbls + } + if err := adminClient.CreateTable(ctx, "mytable"); err != nil { + t.Fatalf("Creating table: %v", err) + } + if err := adminClient.CreateTable(ctx, "myothertable"); err != nil { + t.Fatalf("Creating table: %v", err) + } + if got, want := list(), []string{"myothertable", "mytable"}; !reflect.DeepEqual(got, want) { + t.Errorf("adminClient.Tables returned %#v, want %#v", got, want) + } + if err := adminClient.DeleteTable(ctx, "myothertable"); err != nil { + t.Fatalf("Deleting table: %v", err) + } + if got, want := list(), []string{"mytable"}; !reflect.DeepEqual(got, want) { + t.Errorf("adminClient.Tables returned %#v, want %#v", got, want) + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/bigtable.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/bigtable.go new file mode 100644 index 000000000..ecdf3532f --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/bigtable.go @@ -0,0 +1,525 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bigtable + +import ( + "fmt" + "io" + "strconv" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "google.golang.org/cloud" + btdpb "google.golang.org/cloud/bigtable/internal/data_proto" + btspb "google.golang.org/cloud/bigtable/internal/service_proto" + "google.golang.org/cloud/internal/transport" + "google.golang.org/grpc" +) + +const prodAddr = "bigtable.googleapis.com:443" + +// Client is a client for reading and writing data to tables in a cluster. +type Client struct { + conn *grpc.ClientConn + client btspb.BigtableServiceClient + + project, zone, cluster string +} + +// NewClient creates a new Client for a given project, zone and cluster. +func NewClient(ctx context.Context, project, zone, cluster string, opts ...cloud.ClientOption) (*Client, error) { + o := []cloud.ClientOption{ + cloud.WithEndpoint(prodAddr), + cloud.WithScopes(Scope), + cloud.WithUserAgent(clientUserAgent), + } + o = append(o, opts...) + conn, err := transport.DialGRPC(ctx, o...) + if err != nil { + return nil, fmt.Errorf("dialing: %v", err) + } + return &Client{ + conn: conn, + client: btspb.NewBigtableServiceClient(conn), + + project: project, + zone: zone, + cluster: cluster, + }, nil +} + +// Close closes the Client. +func (c *Client) Close() { + c.conn.Close() +} + +func (c *Client) fullTableName(table string) string { + return fmt.Sprintf("projects/%s/zones/%s/clusters/%s/tables/%s", c.project, c.zone, c.cluster, table) +} + +// A Table refers to a table. +type Table struct { + c *Client + table string +} + +// Open opens a table. +func (c *Client) Open(table string) *Table { + return &Table{ + c: c, + table: table, + } +} + +// TODO(dsymonds): Read method that returns a sequence of ReadItems. + +// ReadRows reads rows from a table. f is called for each row. +// If f returns false, the stream is shut down and ReadRows returns. +// f owns its argument, and f is called serially. +// +// By default, the yielded rows will contain all values in all cells. +// Use RowFilter to limit the cells returned. +func (t *Table) ReadRows(ctx context.Context, arg RowRange, f func(Row) bool, opts ...ReadOption) error { + req := &btspb.ReadRowsRequest{ + TableName: t.c.fullTableName(t.table), + Target: &btspb.ReadRowsRequest_RowRange{arg.proto()}, + } + for _, opt := range opts { + opt.set(req) + } + ctx, cancel := context.WithCancel(ctx) // for aborting the stream + stream, err := t.c.client.ReadRows(ctx, req) + if err != nil { + return err + } + cr := new(chunkReader) + for { + res, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if row := cr.process(res); row != nil { + if !f(row) { + // Cancel and drain stream. + cancel() + for { + if _, err := stream.Recv(); err != nil { + return nil + } + } + } + } + } + return nil +} + +// ReadRow is a convenience implementation of a single-row reader. +// A missing row will return a zero-length map and a nil error. +func (t *Table) ReadRow(ctx context.Context, row string, opts ...ReadOption) (Row, error) { + var r Row + err := t.ReadRows(ctx, SingleRow(row), func(rr Row) bool { + r = rr + return true + }, opts...) + return r, err +} + +type chunkReader struct { + partial map[string]Row // incomplete rows +} + +// process handles a single btspb.ReadRowsResponse. +// If it completes a row, that row is returned. +func (cr *chunkReader) process(rrr *btspb.ReadRowsResponse) Row { + if cr.partial == nil { + cr.partial = make(map[string]Row) + } + row := string(rrr.RowKey) + r := cr.partial[row] + if r == nil { + r = make(Row) + cr.partial[row] = r + } + for _, chunk := range rrr.Chunks { + switch c := chunk.Chunk.(type) { + case *btspb.ReadRowsResponse_Chunk_ResetRow: + r = make(Row) + cr.partial[row] = r + continue + case *btspb.ReadRowsResponse_Chunk_CommitRow: + delete(cr.partial, row) + return r // assume that this is the last chunk + case *btspb.ReadRowsResponse_Chunk_RowContents: + decodeFamilyProto(r, row, c.RowContents) + } + } + return nil +} + +// decodeFamilyProto adds the cell data from f to the given row. +func decodeFamilyProto(r Row, row string, f *btdpb.Family) { + fam := f.Name // does not have colon + for _, col := range f.Columns { + for _, cell := range col.Cells { + ri := ReadItem{ + Row: row, + Column: fmt.Sprintf("%s:%s", fam, col.Qualifier), + Timestamp: Timestamp(cell.TimestampMicros), + Value: cell.Value, + } + r[fam] = append(r[fam], ri) + } + } +} + +// A RowRange is used to describe the rows to be read. +// A RowRange is a half-open interval [Start, Limit) encompassing +// all the rows with keys at least as large as Start, and less than Limit. +// (Bigtable string comparison is the same as Go's.) +// A RowRange can be unbounded, encompassing all keys at least as large as Start. +type RowRange struct { + start string + limit string +} + +// NewRange returns the new RowRange [begin, end). +func NewRange(begin, end string) RowRange { + return RowRange{ + start: begin, + limit: end, + } +} + +// Unbounded tests whether a RowRange is unbounded. +func (r RowRange) Unbounded() bool { + return r.limit == "" +} + +// Contains says whether the RowRange contains the key. +func (r RowRange) Contains(row string) bool { + return r.start <= row && (r.limit == "" || r.limit > row) +} + +// String provides a printable description of a RowRange. +func (r RowRange) String() string { + a := strconv.Quote(r.start) + if r.Unbounded() { + return fmt.Sprintf("[%s,∞)", a) + } + return fmt.Sprintf("[%s,%q)", a, r.limit) +} + +func (r RowRange) proto() *btdpb.RowRange { + if r.Unbounded() { + return &btdpb.RowRange{StartKey: []byte(r.start)} + } + return &btdpb.RowRange{ + StartKey: []byte(r.start), + EndKey: []byte(r.limit), + } +} + +// SingleRow returns a RowRange for reading a single row. +func SingleRow(row string) RowRange { + return RowRange{ + start: row, + limit: row + "\x00", + } +} + +// PrefixRange returns a RowRange consisting of all keys starting with the prefix. +func PrefixRange(prefix string) RowRange { + return RowRange{ + start: prefix, + limit: prefixSuccessor(prefix), + } +} + +// InfiniteRange returns the RowRange consisting of all keys at least as +// large as start. +func InfiniteRange(start string) RowRange { + return RowRange{ + start: start, + limit: "", + } +} + +// prefixSuccessor returns the lexically smallest string greater than the +// prefix, if it exists, or "" otherwise. In either case, it is the string +// needed for the Limit of a RowRange. +func prefixSuccessor(prefix string) string { + if prefix == "" { + return "" // infinite range + } + n := len(prefix) + for n--; n >= 0 && prefix[n] == '\xff'; n-- { + } + if n == -1 { + return "" + } + ans := []byte(prefix[:n]) + ans = append(ans, prefix[n]+1) + return string(ans) +} + +// A ReadOption is an optional argument to ReadRows. +type ReadOption interface { + set(req *btspb.ReadRowsRequest) +} + +// RowFilter returns a ReadOption that applies f to the contents of read rows. +func RowFilter(f Filter) ReadOption { return rowFilter{f} } + +type rowFilter struct{ f Filter } + +func (rf rowFilter) set(req *btspb.ReadRowsRequest) { req.Filter = rf.f.proto() } + +// LimitRows returns a ReadOption that will limit the number of rows to be read. +func LimitRows(limit int64) ReadOption { return limitRows{limit} } + +type limitRows struct{ limit int64 } + +func (lr limitRows) set(req *btspb.ReadRowsRequest) { req.NumRowsLimit = lr.limit } + +// A Row is returned by ReadRow. The map is keyed by column family (the prefix +// of the column name before the colon). The values are the returned ReadItems +// for that column family in the order returned by Read. +type Row map[string][]ReadItem + +// Key returns the row's key, or "" if the row is empty. +func (r Row) Key() string { + for _, items := range r { + if len(items) > 0 { + return items[0].Row + } + } + return "" +} + +// A ReadItem is returned by Read. A ReadItem contains data from a specific row and column. +type ReadItem struct { + Row, Column string + Timestamp Timestamp + Value []byte +} + +// Apply applies a Mutation to a specific row. +func (t *Table) Apply(ctx context.Context, row string, m *Mutation, opts ...ApplyOption) error { + after := func(res proto.Message) { + for _, o := range opts { + o.after(res) + } + } + + if m.cond == nil { + req := &btspb.MutateRowRequest{ + TableName: t.c.fullTableName(t.table), + RowKey: []byte(row), + Mutations: m.ops, + } + res, err := t.c.client.MutateRow(ctx, req) + if err == nil { + after(res) + } + return err + } + req := &btspb.CheckAndMutateRowRequest{ + TableName: t.c.fullTableName(t.table), + RowKey: []byte(row), + PredicateFilter: m.cond.proto(), + } + if m.mtrue != nil { + req.TrueMutations = m.mtrue.ops + } + if m.mfalse != nil { + req.FalseMutations = m.mfalse.ops + } + res, err := t.c.client.CheckAndMutateRow(ctx, req) + if err == nil { + after(res) + } + return err +} + +// An ApplyOption is an optional argument to Apply. +type ApplyOption interface { + after(res proto.Message) +} + +type applyAfterFunc func(res proto.Message) + +func (a applyAfterFunc) after(res proto.Message) { a(res) } + +// GetCondMutationResult returns an ApplyOption that reports whether the conditional +// mutation's condition matched. +func GetCondMutationResult(matched *bool) ApplyOption { + return applyAfterFunc(func(res proto.Message) { + if res, ok := res.(*btspb.CheckAndMutateRowResponse); ok { + *matched = res.PredicateMatched + } + }) +} + +// Mutation represents a set of changes for a single row of a table. +type Mutation struct { + ops []*btdpb.Mutation + + // for conditional mutations + cond Filter + mtrue, mfalse *Mutation +} + +// NewMutation returns a new mutation. +func NewMutation() *Mutation { + return new(Mutation) +} + +// NewCondMutation returns a conditional mutation. +// The given row filter determines which mutation is applied: +// If the filter matches any cell in the row, mtrue is applied; +// otherwise, mfalse is applied. +// Either given mutation may be nil. +func NewCondMutation(cond Filter, mtrue, mfalse *Mutation) *Mutation { + return &Mutation{cond: cond, mtrue: mtrue, mfalse: mfalse} +} + +// Set sets a value in a specified column, with the given timestamp. +// The timestamp will be truncated to millisecond resolution. +// A timestamp of ServerTime means to use the server timestamp. +func (m *Mutation) Set(family, column string, ts Timestamp, value []byte) { + if ts != ServerTime { + // Truncate to millisecond resolution, since that's the default table config. + // TODO(dsymonds): Provide a way to override this behaviour. + ts -= ts % 1000 + } + m.ops = append(m.ops, &btdpb.Mutation{Mutation: &btdpb.Mutation_SetCell_{&btdpb.Mutation_SetCell{ + FamilyName: family, + ColumnQualifier: []byte(column), + TimestampMicros: int64(ts), + Value: value, + }}}) +} + +// DeleteCellsInColumn will delete all the cells whose columns are family:column. +func (m *Mutation) DeleteCellsInColumn(family, column string) { + m.ops = append(m.ops, &btdpb.Mutation{Mutation: &btdpb.Mutation_DeleteFromColumn_{&btdpb.Mutation_DeleteFromColumn{ + FamilyName: family, + ColumnQualifier: []byte(column), + }}}) +} + +// DeleteTimestampRange deletes all cells whose columns are family:column +// and whose timestamps are in the half-open interval [start, end). +// If end is zero, it will be interpreted as infinity. +func (m *Mutation) DeleteTimestampRange(family, column string, start, end Timestamp) { + m.ops = append(m.ops, &btdpb.Mutation{Mutation: &btdpb.Mutation_DeleteFromColumn_{&btdpb.Mutation_DeleteFromColumn{ + FamilyName: family, + ColumnQualifier: []byte(column), + TimeRange: &btdpb.TimestampRange{ + StartTimestampMicros: int64(start), + EndTimestampMicros: int64(end), + }, + }}}) +} + +// DeleteCellsInFamily will delete all the cells whose columns are family:*. +func (m *Mutation) DeleteCellsInFamily(family string) { + m.ops = append(m.ops, &btdpb.Mutation{Mutation: &btdpb.Mutation_DeleteFromFamily_{&btdpb.Mutation_DeleteFromFamily{ + FamilyName: family, + }}}) +} + +// DeleteRow deletes the entire row. +func (m *Mutation) DeleteRow() { + m.ops = append(m.ops, &btdpb.Mutation{Mutation: &btdpb.Mutation_DeleteFromRow_{&btdpb.Mutation_DeleteFromRow{}}}) +} + +// Timestamp is in units of microseconds since 1 January 1970. +type Timestamp int64 + +// ServerTime is a specific Timestamp that may be passed to (*Mutation).Set. +// It indicates that the server's timestamp should be used. +const ServerTime Timestamp = -1 + +// Time converts a time.Time into a Timestamp. +func Time(t time.Time) Timestamp { return Timestamp(t.UnixNano() / 1e3) } + +// Now returns the Timestamp representation of the current time on the client. +func Now() Timestamp { return Time(time.Now()) } + +// Time converts a Timestamp into a time.Time. +func (ts Timestamp) Time() time.Time { return time.Unix(0, int64(ts)*1e3) } + +// ApplyReadModifyWrite applies a ReadModifyWrite to a specific row. +// It returns the newly written cells. +func (t *Table) ApplyReadModifyWrite(ctx context.Context, row string, m *ReadModifyWrite) (Row, error) { + req := &btspb.ReadModifyWriteRowRequest{ + TableName: t.c.fullTableName(t.table), + RowKey: []byte(row), + Rules: m.ops, + } + res, err := t.c.client.ReadModifyWriteRow(ctx, req) + if err != nil { + return nil, err + } + r := make(Row) + for _, fam := range res.Families { // res is *btdpb.Row, fam is *btdpb.Family + decodeFamilyProto(r, row, fam) + } + return r, nil +} + +// ReadModifyWrite represents a set of operations on a single row of a table. +// It is like Mutation but for non-idempotent changes. +// When applied, these operations operate on the latest values of the row's cells, +// and result in a new value being written to the relevant cell with a timestamp +// that is max(existing timestamp, current server time). +// +// The application of a ReadModifyWrite is atomic; concurrent ReadModifyWrites will +// be executed serially by the server. +type ReadModifyWrite struct { + ops []*btdpb.ReadModifyWriteRule +} + +// NewReadModifyWrite returns a new ReadModifyWrite. +func NewReadModifyWrite() *ReadModifyWrite { return new(ReadModifyWrite) } + +// AppendValue appends a value to a specific cell's value. +// If the cell is unset, it will be treated as an empty value. +func (m *ReadModifyWrite) AppendValue(family, column string, v []byte) { + m.ops = append(m.ops, &btdpb.ReadModifyWriteRule{ + FamilyName: family, + ColumnQualifier: []byte(column), + Rule: &btdpb.ReadModifyWriteRule_AppendValue{v}, + }) +} + +// Increment interprets the value in a specific cell as a 64-bit big-endian signed integer, +// and adds a value to it. If the cell is unset, it will be treated as zero. +// If the cell is set and is not an 8-byte value, the entire ApplyReadModifyWrite +// operation will fail. +func (m *ReadModifyWrite) Increment(family, column string, delta int64) { + m.ops = append(m.ops, &btdpb.ReadModifyWriteRule{ + FamilyName: family, + ColumnQualifier: []byte(column), + Rule: &btdpb.ReadModifyWriteRule_IncrementAmount{delta}, + }) +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/bigtable_test.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/bigtable_test.go new file mode 100644 index 000000000..5363fafc1 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/bigtable_test.go @@ -0,0 +1,579 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bigtable + +import ( + "flag" + "fmt" + "math/rand" + "reflect" + "sort" + "strings" + "sync" + "testing" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "google.golang.org/cloud" + "google.golang.org/cloud/bigtable/bttest" + btspb "google.golang.org/cloud/bigtable/internal/service_proto" + "google.golang.org/grpc" +) + +func dataChunk(fam, col string, ts int64, data string) string { + return fmt.Sprintf("chunks:>>>", fam, col, ts, data) +} + +func commit() string { return "chunks:" } + +var chunkTests = []struct { + desc string + chunks []string // sequence of ReadRowsResponse protos in text format + want map[string]Row +}{ + { + desc: "single row single chunk", + chunks: []string{ + `row_key: "row1" ` + dataChunk("fam", "col1", 1428382701000000, "data") + commit(), + }, + want: map[string]Row{ + "row1": Row{ + "fam": []ReadItem{{ + Row: "row1", + Column: "fam:col1", + Timestamp: 1428382701000000, + Value: []byte("data"), + }}, + }, + }, + }, + { + desc: "single row multiple chunks", + chunks: []string{ + `row_key: "row1" ` + dataChunk("fam", "col1", 1428382701000000, "data"), + `row_key: "row1" ` + dataChunk("fam", "col2", 1428382702000000, "more data"), + `row_key: "row1" ` + commit(), + }, + want: map[string]Row{ + "row1": Row{ + "fam": []ReadItem{ + { + Row: "row1", + Column: "fam:col1", + Timestamp: 1428382701000000, + Value: []byte("data"), + }, + { + Row: "row1", + Column: "fam:col2", + Timestamp: 1428382702000000, + Value: []byte("more data"), + }, + }, + }, + }, + }, + // TODO(dsymonds): More test cases, including + // - multiple rows + // - reset_row +} + +func TestChunkReader(t *testing.T) { + for _, tc := range chunkTests { + cr := new(chunkReader) + got := make(map[string]Row) + for i, txt := range tc.chunks { + rrr := new(btspb.ReadRowsResponse) + if err := proto.UnmarshalText(txt, rrr); err != nil { + t.Fatalf("%s: internal error: bad #%d test text: %v", tc.desc, i, err) + } + if row := cr.process(rrr); row != nil { + got[row.Key()] = row + } + } + // TODO(dsymonds): check for partial rows? + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("%s: processed response mismatch.\n got %+v\nwant %+v", tc.desc, got, tc.want) + } + } +} + +func TestPrefix(t *testing.T) { + tests := []struct { + prefix, succ string + }{ + {"", ""}, + {"\xff", ""}, // when used, "" means Infinity + {"x\xff", "y"}, + {"\xfe", "\xff"}, + } + for _, tc := range tests { + got := prefixSuccessor(tc.prefix) + if got != tc.succ { + t.Errorf("prefixSuccessor(%q) = %q, want %s", tc.prefix, got, tc.succ) + continue + } + r := PrefixRange(tc.prefix) + if tc.succ == "" && r.limit != "" { + t.Errorf("PrefixRange(%q) got limit %q", tc.prefix, r.limit) + } + if tc.succ != "" && r.limit != tc.succ { + t.Errorf("PrefixRange(%q) got limit %q, want %q", tc.prefix, r.limit, tc.succ) + } + } +} + +var useProd = flag.String("use_prod", "", `if set to "proj,zone,cluster,table", run integration test against production`) + +func TestClientIntegration(t *testing.T) { + start := time.Now() + lastCheckpoint := start + checkpoint := func(s string) { + n := time.Now() + t.Logf("[%s] %v since start, %v since last checkpoint", s, n.Sub(start), n.Sub(lastCheckpoint)) + lastCheckpoint = n + } + + proj, zone, cluster, table := "proj", "zone", "cluster", "mytable" + var clientOpts []cloud.ClientOption + timeout := 10 * time.Second + if *useProd == "" { + srv, err := bttest.NewServer() + if err != nil { + t.Fatal(err) + } + defer srv.Close() + t.Logf("bttest.Server running on %s", srv.Addr) + conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) + if err != nil { + t.Fatalf("grpc.Dial: %v", err) + } + clientOpts = []cloud.ClientOption{cloud.WithBaseGRPC(conn)} + } else { + t.Logf("Running test against production") + a := strings.Split(*useProd, ",") + proj, zone, cluster, table = a[0], a[1], a[2], a[3] + timeout = 5 * time.Minute + } + + ctx, _ := context.WithTimeout(context.Background(), timeout) + + client, err := NewClient(ctx, proj, zone, cluster, clientOpts...) + if err != nil { + t.Fatalf("NewClient: %v", err) + } + defer client.Close() + checkpoint("dialed Client") + + adminClient, err := NewAdminClient(ctx, proj, zone, cluster, clientOpts...) + if err != nil { + t.Fatalf("NewAdminClient: %v", err) + } + defer adminClient.Close() + checkpoint("dialed AdminClient") + + // Delete the table at the end of the test. + // Do this even before creating the table so that if this is running + // against production and CreateTable fails there's a chance of cleaning it up. + defer adminClient.DeleteTable(ctx, table) + + if err := adminClient.CreateTable(ctx, table); err != nil { + t.Fatalf("Creating table: %v", err) + } + checkpoint("created table") + if err := adminClient.CreateColumnFamily(ctx, table, "follows"); err != nil { + t.Fatalf("Creating column family: %v", err) + } + checkpoint(`created "follows" column family`) + + tbl := client.Open(table) + + // Insert some data. + initialData := map[string][]string{ + "wmckinley": []string{"tjefferson"}, + "gwashington": []string{"jadams"}, + "tjefferson": []string{"gwashington", "jadams"}, // wmckinley set conditionally below + "jadams": []string{"gwashington", "tjefferson"}, + } + for row, ss := range initialData { + mut := NewMutation() + for _, name := range ss { + mut.Set("follows", name, 0, []byte("1")) + } + if err := tbl.Apply(ctx, row, mut); err != nil { + t.Errorf("Mutating row %q: %v", row, err) + } + } + checkpoint("inserted initial data") + + // Do a conditional mutation with a complex filter. + mutTrue := NewMutation() + mutTrue.Set("follows", "wmckinley", 0, []byte("1")) + filter := ChainFilters(ColumnFilter("gwash[iz].*"), ValueFilter(".")) + mut := NewCondMutation(filter, mutTrue, nil) + if err := tbl.Apply(ctx, "tjefferson", mut); err != nil { + t.Errorf("Conditionally mutating row: %v", err) + } + // Do a second condition mutation with a filter that does not match, + // and thus no changes should be made. + mutTrue = NewMutation() + mutTrue.DeleteRow() + filter = ColumnFilter("snoop.dogg") + mut = NewCondMutation(filter, mutTrue, nil) + if err := tbl.Apply(ctx, "tjefferson", mut); err != nil { + t.Errorf("Conditionally mutating row: %v", err) + } + checkpoint("did two conditional mutations") + + // Fetch a row. + row, err := tbl.ReadRow(ctx, "jadams") + if err != nil { + t.Fatalf("Reading a row: %v", err) + } + wantRow := Row{ + "follows": []ReadItem{ + {Row: "jadams", Column: "follows:gwashington", Value: []byte("1")}, + {Row: "jadams", Column: "follows:tjefferson", Value: []byte("1")}, + }, + } + for _, ris := range row { + sort.Sort(byColumn(ris)) + } + if !reflect.DeepEqual(row, wantRow) { + t.Errorf("Read row mismatch.\n got %#v\nwant %#v", row, wantRow) + } + checkpoint("tested ReadRow") + + // Do a bunch of reads with filters. + readTests := []struct { + desc string + rr RowRange + filter Filter // may be nil + + // We do the read, grab all the cells, turn them into "--", + // sort that list, and join with a comma. + want string + }{ + { + desc: "read all, unfiltered", + rr: RowRange{}, + want: "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1,tjefferson-gwashington-1,tjefferson-jadams-1,tjefferson-wmckinley-1,wmckinley-tjefferson-1", + }, + { + desc: "read with InfiniteRange, unfiltered", + rr: InfiniteRange("tjefferson"), + want: "tjefferson-gwashington-1,tjefferson-jadams-1,tjefferson-wmckinley-1,wmckinley-tjefferson-1", + }, + { + desc: "read with NewRange, unfiltered", + rr: NewRange("gargamel", "hubbard"), + want: "gwashington-jadams-1", + }, + { + desc: "read with PrefixRange, unfiltered", + rr: PrefixRange("jad"), + want: "jadams-gwashington-1,jadams-tjefferson-1", + }, + { + desc: "read with SingleRow, unfiltered", + rr: SingleRow("wmckinley"), + want: "wmckinley-tjefferson-1", + }, + { + desc: "read all, with ColumnFilter", + rr: RowRange{}, + filter: ColumnFilter(".*j.*"), // matches "jadams" and "tjefferson" + want: "gwashington-jadams-1,jadams-tjefferson-1,tjefferson-jadams-1,wmckinley-tjefferson-1", + }, + } + for _, tc := range readTests { + var opts []ReadOption + if tc.filter != nil { + opts = append(opts, RowFilter(tc.filter)) + } + var elt []string + err := tbl.ReadRows(context.Background(), tc.rr, func(r Row) bool { + for _, ris := range r { + for _, ri := range ris { + // Use the column qualifier only to make the test data briefer. + col := ri.Column[strings.Index(ri.Column, ":")+1:] + x := fmt.Sprintf("%s-%s-%s", ri.Row, col, ri.Value) + elt = append(elt, x) + } + } + return true + }, opts...) + if err != nil { + t.Errorf("%s: %v", tc.desc, err) + continue + } + sort.Strings(elt) + if got := strings.Join(elt, ","); got != tc.want { + t.Errorf("%s: wrong reads.\n got %q\nwant %q", tc.desc, got, tc.want) + } + } + checkpoint("tested ReadRows in a few ways") + + // Do a scan and stop part way through. + // Verify that the ReadRows callback doesn't keep running. + stopped := false + err = tbl.ReadRows(ctx, InfiniteRange(""), func(r Row) bool { + if r.Key() < "h" { + return true + } + if !stopped { + stopped = true + return false + } + t.Errorf("ReadRows kept scanning to row %q after being told to stop", r.Key()) + return false + }) + if err != nil { + t.Errorf("Partial ReadRows: %v", err) + } + checkpoint("did partial ReadRows test") + + // Delete a row and check it goes away. + mut = NewMutation() + mut.DeleteRow() + if err := tbl.Apply(ctx, "wmckinley", mut); err != nil { + t.Errorf("Apply DeleteRow: %v", err) + } + row, err = tbl.ReadRow(ctx, "wmckinley") + if err != nil { + t.Fatalf("Reading a row after DeleteRow: %v", err) + } + if len(row) != 0 { + t.Fatalf("Read non-zero row after DeleteRow: %v", row) + } + checkpoint("exercised DeleteRow") + + // Check ReadModifyWrite. + + if err := adminClient.CreateColumnFamily(ctx, table, "counter"); err != nil { + t.Fatalf("Creating column family: %v", err) + } + + appendRMW := func(b []byte) *ReadModifyWrite { + rmw := NewReadModifyWrite() + rmw.AppendValue("counter", "likes", b) + return rmw + } + incRMW := func(n int64) *ReadModifyWrite { + rmw := NewReadModifyWrite() + rmw.Increment("counter", "likes", n) + return rmw + } + rmwSeq := []struct { + desc string + rmw *ReadModifyWrite + want []byte + }{ + { + desc: "append #1", + rmw: appendRMW([]byte{0, 0, 0}), + want: []byte{0, 0, 0}, + }, + { + desc: "append #2", + rmw: appendRMW([]byte{0, 0, 0, 0, 17}), // the remaining 40 bits to make a big-endian 17 + want: []byte{0, 0, 0, 0, 0, 0, 0, 17}, + }, + { + desc: "increment", + rmw: incRMW(8), + want: []byte{0, 0, 0, 0, 0, 0, 0, 25}, + }, + } + for _, step := range rmwSeq { + row, err := tbl.ApplyReadModifyWrite(ctx, "gwashington", step.rmw) + if err != nil { + t.Fatalf("ApplyReadModifyWrite %+v: %v", step.rmw, err) + } + clearTimestamps(row) + wantRow := Row{"counter": []ReadItem{{Row: "gwashington", Column: "counter:likes", Value: step.want}}} + if !reflect.DeepEqual(row, wantRow) { + t.Fatalf("After %s,\n got %v\nwant %v", step.desc, row, wantRow) + } + } + checkpoint("tested ReadModifyWrite") + + // Test arbitrary timestamps more thoroughly. + if err := adminClient.CreateColumnFamily(ctx, table, "ts"); err != nil { + t.Fatalf("Creating column family: %v", err) + } + const numVersions = 4 + mut = NewMutation() + for i := 0; i < numVersions; i++ { + // Timestamps are used in thousands because the server + // only permits that granularity. + mut.Set("ts", "col", Timestamp(i*1000), []byte(fmt.Sprintf("val-%d", i))) + } + if err := tbl.Apply(ctx, "testrow", mut); err != nil { + t.Fatalf("Mutating row: %v", err) + } + r, err := tbl.ReadRow(ctx, "testrow") + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{"ts": []ReadItem{ + // These should be returned in descending timestamp order. + {Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")}, + {Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")}, + {Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")}, + {Row: "testrow", Column: "ts:col", Timestamp: 0, Value: []byte("val-0")}, + }} + if !reflect.DeepEqual(r, wantRow) { + t.Errorf("Cell with multiple versions,\n got %v\nwant %v", r, wantRow) + } + // Do the same read, but filter to the latest two versions. + r, err = tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(2))) + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{"ts": []ReadItem{ + {Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")}, + {Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")}, + }} + if !reflect.DeepEqual(r, wantRow) { + t.Errorf("Cell with multiple versions and LatestNFilter(2),\n got %v\nwant %v", r, wantRow) + } + // Delete the cell with timestamp 2000 and repeat the last read, + // checking that we get ts 3000 and ts 1000. + mut = NewMutation() + mut.DeleteTimestampRange("ts", "col", 2000, 3000) // half-open interval + if err := tbl.Apply(ctx, "testrow", mut); err != nil { + t.Fatalf("Mutating row: %v", err) + } + r, err = tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(2))) + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{"ts": []ReadItem{ + {Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")}, + {Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")}, + }} + if !reflect.DeepEqual(r, wantRow) { + t.Errorf("Cell with multiple versions and LatestNFilter(2), after deleting timestamp 2000,\n got %v\nwant %v", r, wantRow) + } + checkpoint("tested multiple versions in a cell") + + // Do highly concurrent reads/writes. + // TODO(dsymonds): Raise this to 1000 when https://github.com/grpc/grpc-go/issues/205 is resolved. + const maxConcurrency = 100 + var wg sync.WaitGroup + for i := 0; i < maxConcurrency; i++ { + wg.Add(1) + go func() { + defer wg.Done() + switch r := rand.Intn(100); { // r ∈ [0,100) + case 0 <= r && r < 30: + // Do a read. + _, err := tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(1))) + if err != nil { + t.Errorf("Concurrent read: %v", err) + } + case 30 <= r && r < 100: + // Do a write. + mut := NewMutation() + mut.Set("ts", "col", 0, []byte("data")) + if err := tbl.Apply(ctx, "testrow", mut); err != nil { + t.Errorf("Concurrent write: %v", err) + } + } + }() + } + wg.Wait() + checkpoint("tested high concurrency") + + // Large reads, writes and scans. + bigBytes := make([]byte, 15<<20) // 15 MB is large + nonsense := []byte("lorem ipsum dolor sit amet, ") + fill(bigBytes, nonsense) + mut = NewMutation() + mut.Set("ts", "col", 0, bigBytes) + if err := tbl.Apply(ctx, "bigrow", mut); err != nil { + t.Errorf("Big write: %v", err) + } + r, err = tbl.ReadRow(ctx, "bigrow") + if err != nil { + t.Errorf("Big read: %v", err) + } + wantRow = Row{"ts": []ReadItem{ + {Row: "bigrow", Column: "ts:col", Value: bigBytes}, + }} + if !reflect.DeepEqual(r, wantRow) { + t.Errorf("Big read returned incorrect bytes: %v", r) + } + // Now write 1000 rows, each with 82 KB values, then scan them all. + medBytes := make([]byte, 82<<10) + fill(medBytes, nonsense) + sem := make(chan int, 50) // do up to 50 mutations at a time. + for i := 0; i < 1000; i++ { + mut := NewMutation() + mut.Set("ts", "big-scan", 0, medBytes) + row := fmt.Sprintf("row-%d", i) + wg.Add(1) + go func() { + defer wg.Done() + defer func() { <-sem }() + sem <- 1 + if err := tbl.Apply(ctx, row, mut); err != nil { + t.Errorf("Preparing large scan: %v", err) + } + }() + } + wg.Wait() + n := 0 + err = tbl.ReadRows(ctx, PrefixRange("row-"), func(r Row) bool { + for _, ris := range r { + for _, ri := range ris { + n += len(ri.Value) + } + } + return true + }, RowFilter(ColumnFilter("big-scan"))) + if err != nil { + t.Errorf("Doing large scan: %v", err) + } + if want := 1000 * len(medBytes); n != want { + t.Errorf("Large scan returned %d bytes, want %d", n, want) + } + checkpoint("tested big read/write/scan") +} + +func fill(b, sub []byte) { + for len(b) > len(sub) { + n := copy(b, sub) + b = b[n:] + } +} + +type byColumn []ReadItem + +func (b byColumn) Len() int { return len(b) } +func (b byColumn) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byColumn) Less(i, j int) bool { return b[i].Column < b[j].Column } + +func clearTimestamps(r Row) { + for _, ris := range r { + for i := range ris { + ris[i].Timestamp = 0 + } + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/bttest/inmem.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/bttest/inmem.go new file mode 100644 index 000000000..4a3d369e9 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/bttest/inmem.go @@ -0,0 +1,641 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package bttest contains test helpers for working with the bigtable package. + +To use a Server, create it, and then connect to it with no security: +(The project/zone/cluster values are ignored.) + srv, err := bttest.NewServer() + ... + conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) + ... + client, err := bigtable.NewClient(ctx, proj, zone, cluster, + bigtable.WithBaseGRPC(conn)) + ... +*/ +package bttest + +import ( + "encoding/binary" + "fmt" + "log" + "net" + "regexp" + "sort" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + btdpb "google.golang.org/cloud/bigtable/internal/data_proto" + emptypb "google.golang.org/cloud/bigtable/internal/empty" + btspb "google.golang.org/cloud/bigtable/internal/service_proto" + bttdpb "google.golang.org/cloud/bigtable/internal/table_data_proto" + bttspb "google.golang.org/cloud/bigtable/internal/table_service_proto" + "google.golang.org/grpc" +) + +// Server is an in-memory Cloud Bigtable fake. +// It is unauthenticated, and only a rough approximation. +type Server struct { + Addr string + + l net.Listener + srv *grpc.Server + s *server +} + +// server is the real implementation of the fake. +// It is a separate and unexported type so the API won't be cluttered with +// methods that are only relevant to the fake's implementation. +type server struct { + mu sync.Mutex + tables map[string]*table // keyed by fully qualified name + + // Any unimplemented methods will cause a panic. + bttspb.BigtableTableServiceServer + btspb.BigtableServiceServer +} + +// NewServer creates a new Server. The Server will be listening for gRPC connections +// at the address named by the Addr field, without TLS. +func NewServer() (*Server, error) { + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, err + } + + s := &Server{ + Addr: l.Addr().String(), + l: l, + srv: grpc.NewServer(), + s: &server{ + tables: make(map[string]*table), + }, + } + bttspb.RegisterBigtableTableServiceServer(s.srv, s.s) + btspb.RegisterBigtableServiceServer(s.srv, s.s) + + go s.srv.Serve(s.l) + + return s, nil +} + +// Close shuts down the server. +func (s *Server) Close() { + s.srv.Stop() + s.l.Close() +} + +func (s *server) CreateTable(ctx context.Context, req *bttspb.CreateTableRequest) (*bttdpb.Table, error) { + tbl := req.Name + "/tables/" + req.TableId + + s.mu.Lock() + if _, ok := s.tables[tbl]; ok { + s.mu.Unlock() + return nil, fmt.Errorf("table %q already exists", tbl) + } + s.tables[tbl] = newTable() + s.mu.Unlock() + + return &bttdpb.Table{Name: tbl}, nil +} + +func (s *server) ListTables(ctx context.Context, req *bttspb.ListTablesRequest) (*bttspb.ListTablesResponse, error) { + res := &bttspb.ListTablesResponse{} + prefix := req.Name + "/tables/" + + s.mu.Lock() + for tbl := range s.tables { + if strings.HasPrefix(tbl, prefix) { + res.Tables = append(res.Tables, &bttdpb.Table{Name: tbl}) + } + } + s.mu.Unlock() + + return res, nil +} + +func (s *server) DeleteTable(ctx context.Context, req *bttspb.DeleteTableRequest) (*emptypb.Empty, error) { + s.mu.Lock() + defer s.mu.Unlock() + if _, ok := s.tables[req.Name]; !ok { + return nil, fmt.Errorf("no such table %q", req.Name) + } + delete(s.tables, req.Name) + return &emptypb.Empty{}, nil +} + +func (s *server) CreateColumnFamily(ctx context.Context, req *bttspb.CreateColumnFamilyRequest) (*bttdpb.ColumnFamily, error) { + s.mu.Lock() + tbl, ok := s.tables[req.Name] + s.mu.Unlock() + if !ok { + return nil, fmt.Errorf("no such table %q", req.Name) + } + + // Check it is unique and record it. + fam := req.ColumnFamilyId + tbl.mu.Lock() + defer tbl.mu.Unlock() + if _, ok := tbl.families[fam]; ok { + return nil, fmt.Errorf("family %q already exists", fam) + } + tbl.families[fam] = true + return &bttdpb.ColumnFamily{ + Name: req.Name + "/columnFamilies/" + fam, + }, nil +} + +func (s *server) ReadRows(req *btspb.ReadRowsRequest, stream btspb.BigtableService_ReadRowsServer) error { + s.mu.Lock() + tbl, ok := s.tables[req.TableName] + s.mu.Unlock() + if !ok { + return fmt.Errorf("no such table %q", req.TableName) + } + + var start, end string // half-open interval + switch targ := req.Target.(type) { + case *btspb.ReadRowsRequest_RowRange: + start, end = string(targ.RowRange.StartKey), string(targ.RowRange.EndKey) + case *btspb.ReadRowsRequest_RowKey: + // A single row read is simply an edge case. + start = string(targ.RowKey) + end = start + "\x00" + default: + return fmt.Errorf("unknown ReadRowsRequest.Target oneof %T", targ) + } + + // Get rows to stream back. + tbl.mu.RLock() + si, ei := 0, len(tbl.rows) // half-open interval + if start != "" { + si = sort.Search(len(tbl.rows), func(i int) bool { return tbl.rows[i].key >= start }) + } + if end != "" { + ei = sort.Search(len(tbl.rows), func(i int) bool { return tbl.rows[i].key >= end }) + } + if si >= ei { + tbl.mu.RUnlock() + return nil + } + rows := make([]*row, ei-si) + copy(rows, tbl.rows[si:ei]) + tbl.mu.RUnlock() + + for _, r := range rows { + if err := streamRow(stream, r, req.Filter); err != nil { + return err + } + } + + return nil +} + +func streamRow(stream btspb.BigtableService_ReadRowsServer, r *row, f *btdpb.RowFilter) error { + r.mu.Lock() + nr := r.copy() + r.mu.Unlock() + r = nr + + filterRow(f, r) + + rrr := &btspb.ReadRowsResponse{ + RowKey: []byte(r.key), + } + for col, cells := range r.cells { + i := strings.Index(col, ":") // guaranteed to exist + fam, col := col[:i], col[i+1:] + if len(cells) == 0 { + continue + } + // TODO(dsymonds): Apply transformers. + colm := &btdpb.Column{ + Qualifier: []byte(col), + // Cells is populated below. + } + for _, cell := range cells { + colm.Cells = append(colm.Cells, &btdpb.Cell{ + TimestampMicros: cell.ts, + Value: cell.value, + }) + } + rrr.Chunks = append(rrr.Chunks, &btspb.ReadRowsResponse_Chunk{ + Chunk: &btspb.ReadRowsResponse_Chunk_RowContents{&btdpb.Family{ + Name: fam, + Columns: []*btdpb.Column{colm}, + }}, + }) + } + rrr.Chunks = append(rrr.Chunks, &btspb.ReadRowsResponse_Chunk{Chunk: &btspb.ReadRowsResponse_Chunk_CommitRow{true}}) + return stream.Send(rrr) +} + +// filterRow modifies a row with the given filter. +func filterRow(f *btdpb.RowFilter, r *row) { + if f == nil { + return + } + // Handle filters that apply beyond just including/excluding cells. + switch f := f.Filter.(type) { + case *btdpb.RowFilter_Chain_: + for _, sub := range f.Chain.Filters { + filterRow(sub, r) + } + return + case *btdpb.RowFilter_Interleave_: + srs := make([]*row, 0, len(f.Interleave.Filters)) + for _, sub := range f.Interleave.Filters { + sr := r.copy() + filterRow(sub, sr) + srs = append(srs, sr) + } + // merge + // TODO(dsymonds): is this correct? + r.cells = make(map[string][]cell) + for _, sr := range srs { + for col, cs := range sr.cells { + r.cells[col] = append(r.cells[col], cs...) + } + } + for _, cs := range r.cells { + sort.Sort(byDescTS(cs)) + } + return + case *btdpb.RowFilter_CellsPerColumnLimitFilter: + lim := int(f.CellsPerColumnLimitFilter) + for col, cs := range r.cells { + if len(cs) > lim { + r.cells[col] = cs[:lim] + } + } + return + } + + // Any other case, operate on a per-cell basis. + for key, cs := range r.cells { + i := strings.Index(key, ":") // guaranteed to exist + fam, col := key[:i], key[i+1:] + r.cells[key] = filterCells(f, fam, col, cs) + } +} + +func filterCells(f *btdpb.RowFilter, fam, col string, cs []cell) []cell { + var ret []cell + for _, cell := range cs { + if includeCell(f, fam, col, cell) { + ret = append(ret, cell) + } + } + return ret +} + +func includeCell(f *btdpb.RowFilter, fam, col string, cell cell) bool { + if f == nil { + return true + } + // TODO(dsymonds): Implement many more filters. + switch f := f.Filter.(type) { + default: + log.Printf("WARNING: don't know how to handle filter of type %T (ignoring it)", f) + return true + case *btdpb.RowFilter_ColumnQualifierRegexFilter: + pat := string(f.ColumnQualifierRegexFilter) + rx, err := regexp.Compile(pat) + if err != nil { + log.Printf("Bad column_qualifier_regex_filter pattern %q: %v", pat, err) + return false + } + return rx.MatchString(col) + case *btdpb.RowFilter_ValueRegexFilter: + pat := string(f.ValueRegexFilter) + rx, err := regexp.Compile(pat) + if err != nil { + log.Printf("Bad value_regex_filter pattern %q: %v", pat, err) + return false + } + return rx.Match(cell.value) + } +} + +func (s *server) MutateRow(ctx context.Context, req *btspb.MutateRowRequest) (*emptypb.Empty, error) { + s.mu.Lock() + tbl, ok := s.tables[req.TableName] + s.mu.Unlock() + if !ok { + return nil, fmt.Errorf("no such table %q", req.TableName) + } + + r := tbl.mutableRow(string(req.RowKey)) + r.mu.Lock() + defer r.mu.Unlock() + + if err := applyMutations(tbl, r, req.Mutations); err != nil { + return nil, err + } + return &emptypb.Empty{}, nil +} + +func (s *server) CheckAndMutateRow(ctx context.Context, req *btspb.CheckAndMutateRowRequest) (*btspb.CheckAndMutateRowResponse, error) { + s.mu.Lock() + tbl, ok := s.tables[req.TableName] + s.mu.Unlock() + if !ok { + return nil, fmt.Errorf("no such table %q", req.TableName) + } + + res := &btspb.CheckAndMutateRowResponse{} + + r := tbl.mutableRow(string(req.RowKey)) + r.mu.Lock() + defer r.mu.Unlock() + + // Figure out which mutation to apply. + whichMut := false + if req.PredicateFilter == nil { + // Use true_mutations iff row contains any cells. + whichMut = len(r.cells) > 0 + } else { + // Use true_mutations iff any cells in the row match the filter. + // TODO(dsymonds): This could be cheaper. + nr := r.copy() + filterRow(req.PredicateFilter, nr) + for _, cs := range nr.cells { + if len(cs) > 0 { + whichMut = true + break + } + } + // TODO(dsymonds): Figure out if this is supposed to be set + // even when there's no predicate filter. + res.PredicateMatched = whichMut + } + muts := req.FalseMutations + if whichMut { + muts = req.TrueMutations + } + + if err := applyMutations(tbl, r, muts); err != nil { + return nil, err + } + return res, nil +} + +// applyMutations applies a sequence of mutations to a row. +// It assumes r.mu is locked. +func applyMutations(tbl *table, r *row, muts []*btdpb.Mutation) error { + for _, mut := range muts { + switch mut := mut.Mutation.(type) { + default: + return fmt.Errorf("can't handle mutation type %T", mut) + case *btdpb.Mutation_SetCell_: + set := mut.SetCell + tbl.mu.RLock() + famOK := tbl.families[set.FamilyName] + tbl.mu.RUnlock() + if !famOK { + return fmt.Errorf("unknown family %q", set.FamilyName) + } + ts := set.TimestampMicros + if ts == -1 { // bigtable.ServerTime + ts = time.Now().UnixNano() / 1e3 + ts -= ts % 1000 // round to millisecond granularity + } + if !tbl.validTimestamp(ts) { + return fmt.Errorf("invalid timestamp %d", ts) + } + col := fmt.Sprintf("%s:%s", set.FamilyName, set.ColumnQualifier) + + cs := r.cells[col] + newCell := cell{ts: ts, value: set.Value} + replaced := false + for i, cell := range cs { + if cell.ts == newCell.ts { + cs[i] = newCell + replaced = true + break + } + } + if !replaced { + cs = append(cs, newCell) + } + sort.Sort(byDescTS(cs)) + r.cells[col] = cs + case *btdpb.Mutation_DeleteFromColumn_: + del := mut.DeleteFromColumn + col := fmt.Sprintf("%s:%s", del.FamilyName, del.ColumnQualifier) + + cs := r.cells[col] + if del.TimeRange != nil { + tsr := del.TimeRange + if !tbl.validTimestamp(tsr.StartTimestampMicros) { + return fmt.Errorf("invalid timestamp %d", tsr.StartTimestampMicros) + } + if !tbl.validTimestamp(tsr.EndTimestampMicros) { + return fmt.Errorf("invalid timestamp %d", tsr.EndTimestampMicros) + } + // Find half-open interval to remove. + // Cells are in descending timestamp order, + // so the predicates to sort.Search are inverted. + si, ei := 0, len(cs) + if tsr.StartTimestampMicros > 0 { + ei = sort.Search(len(cs), func(i int) bool { return cs[i].ts < tsr.StartTimestampMicros }) + } + if tsr.EndTimestampMicros > 0 { + si = sort.Search(len(cs), func(i int) bool { return cs[i].ts < tsr.EndTimestampMicros }) + } + if si < ei { + copy(cs[si:], cs[ei:]) + cs = cs[:len(cs)-(ei-si)] + } + } else { + cs = nil + } + if len(cs) == 0 { + delete(r.cells, col) + } else { + r.cells[col] = cs + } + case *btdpb.Mutation_DeleteFromRow_: + r.cells = make(map[string][]cell) + } + } + return nil +} + +func (s *server) ReadModifyWriteRow(ctx context.Context, req *btspb.ReadModifyWriteRowRequest) (*btdpb.Row, error) { + s.mu.Lock() + tbl, ok := s.tables[req.TableName] + s.mu.Unlock() + if !ok { + return nil, fmt.Errorf("no such table %q", req.TableName) + } + + updates := make(map[string]cell) // copy of updated cells; keyed by full column name + + r := tbl.mutableRow(string(req.RowKey)) + r.mu.Lock() + defer r.mu.Unlock() + // Assume all mutations apply to the most recent version of the cell. + // TODO(dsymonds): Verify this assumption and document it in the proto. + for _, rule := range req.Rules { + key := fmt.Sprintf("%s:%s", rule.FamilyName, rule.ColumnQualifier) + + newCell := false + if len(r.cells[key]) == 0 { + r.cells[key] = []cell{{ + // TODO(dsymonds): should this set a timestamp? + }} + newCell = true + } + cell := &r.cells[key][0] + + switch rule := rule.Rule.(type) { + default: + return nil, fmt.Errorf("unknown RMW rule oneof %T", rule) + case *btdpb.ReadModifyWriteRule_AppendValue: + cell.value = append(cell.value, rule.AppendValue...) + case *btdpb.ReadModifyWriteRule_IncrementAmount: + var v int64 + if !newCell { + if len(cell.value) != 8 { + return nil, fmt.Errorf("increment on non-64-bit value") + } + v = int64(binary.BigEndian.Uint64(cell.value)) + } + v += rule.IncrementAmount + var val [8]byte + binary.BigEndian.PutUint64(val[:], uint64(v)) + cell.value = val[:] + } + updates[key] = *cell + } + + res := &btdpb.Row{ + Key: req.RowKey, + } + for col, cell := range updates { + i := strings.Index(col, ":") + fam, qual := col[:i], col[i+1:] + var f *btdpb.Family + for _, ff := range res.Families { + if ff.Name == fam { + f = ff + break + } + } + if f == nil { + f = &btdpb.Family{Name: fam} + res.Families = append(res.Families, f) + } + f.Columns = append(f.Columns, &btdpb.Column{ + Qualifier: []byte(qual), + Cells: []*btdpb.Cell{{ + Value: cell.value, + }}, + }) + } + return res, nil +} + +type table struct { + mu sync.RWMutex + families map[string]bool // keyed by plain family name + rows []*row // sorted by row key + rowIndex map[string]*row // indexed by row key +} + +func newTable() *table { + return &table{ + families: make(map[string]bool), + rowIndex: make(map[string]*row), + } +} + +func (t *table) validTimestamp(ts int64) bool { + // Assume millisecond granularity is required. + return ts%1000 == 0 +} + +func (t *table) mutableRow(row string) *row { + // Try fast path first. + t.mu.RLock() + r := t.rowIndex[row] + t.mu.RUnlock() + if r != nil { + return r + } + + // We probably need to create the row. + t.mu.Lock() + r = t.rowIndex[row] + if r == nil { + r = newRow(row) + t.rowIndex[row] = r + t.rows = append(t.rows, r) + sort.Sort(byRowKey(t.rows)) // yay, inefficient! + } + t.mu.Unlock() + return r +} + +type byRowKey []*row + +func (b byRowKey) Len() int { return len(b) } +func (b byRowKey) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byRowKey) Less(i, j int) bool { return b[i].key < b[j].key } + +type row struct { + key string + + mu sync.Mutex + cells map[string][]cell // keyed by full column name; cells are in descending timestamp order +} + +func newRow(key string) *row { + return &row{ + key: key, + cells: make(map[string][]cell), + } +} + +// copy returns a copy of the row. +// Cell values are aliased. +// r.mu should be held. +func (r *row) copy() *row { + nr := &row{ + key: r.key, + cells: make(map[string][]cell, len(r.cells)), + } + for col, cs := range r.cells { + // Copy the []cell slice, but not the []byte inside each cell. + nr.cells[col] = append([]cell(nil), cs...) + } + return nr +} + +type cell struct { + ts int64 + value []byte +} + +type byDescTS []cell + +func (b byDescTS) Len() int { return len(b) } +func (b byDescTS) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byDescTS) Less(i, j int) bool { return b[i].ts > b[j].ts } diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/cmd/cbt/cbt.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/cmd/cbt/cbt.go new file mode 100644 index 000000000..504686b70 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/cmd/cbt/cbt.go @@ -0,0 +1,580 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +// Command docs are in cbtdoc.go. + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "log" + "os" + "regexp" + "sort" + "strconv" + "strings" + "text/tabwriter" + "text/template" + "time" + + "golang.org/x/net/context" + "google.golang.org/cloud/bigtable" + "google.golang.org/cloud/bigtable/internal/cbtrc" +) + +var ( + oFlag = flag.String("o", "", "if set, redirect stdout to this file") + + config *cbtrc.Config + client *bigtable.Client + adminClient *bigtable.AdminClient + clusterAdminClient *bigtable.ClusterAdminClient +) + +func getClient() *bigtable.Client { + if client == nil { + var err error + client, err = bigtable.NewClient(context.Background(), config.Project, config.Zone, config.Cluster) + if err != nil { + log.Fatalf("Making bigtable.Client: %v", err) + } + } + return client +} + +func getAdminClient() *bigtable.AdminClient { + if adminClient == nil { + var err error + adminClient, err = bigtable.NewAdminClient(context.Background(), config.Project, config.Zone, config.Cluster) + if err != nil { + log.Fatalf("Making bigtable.AdminClient: %v", err) + } + } + return adminClient +} + +func getClusterAdminClient() *bigtable.ClusterAdminClient { + if clusterAdminClient == nil { + var err error + clusterAdminClient, err = bigtable.NewClusterAdminClient(context.Background(), config.Project) + if err != nil { + log.Fatalf("Making bigtable.ClusterAdminClient: %v", err) + } + } + return clusterAdminClient +} + +func main() { + var err error + config, err = cbtrc.Load() + if err != nil { + log.Fatal(err) + } + config.RegisterFlags() + + flag.Usage = usage + flag.Parse() + if err := config.CheckFlags(); err != nil { + log.Fatal(err) + } + if config.Creds != "" { + os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", config.Creds) + } + if flag.NArg() == 0 { + usage() + os.Exit(1) + } + + if *oFlag != "" { + f, err := os.Create(*oFlag) + if err != nil { + log.Fatal(err) + } + defer func() { + if err := f.Close(); err != nil { + log.Fatal(err) + } + }() + os.Stdout = f + } + + ctx := context.Background() + for _, cmd := range commands { + if cmd.Name == flag.Arg(0) { + cmd.do(ctx, flag.Args()[1:]...) + return + } + } + log.Fatalf("Unknown command %q", flag.Arg(0)) +} + +func usage() { + fmt.Fprintf(os.Stderr, "Usage: %s [flags] ...\n", os.Args[0]) + flag.PrintDefaults() + fmt.Fprintf(os.Stderr, "\n%s", cmdSummary) +} + +var cmdSummary string // generated in init, below + +func init() { + var buf bytes.Buffer + tw := tabwriter.NewWriter(&buf, 10, 8, 4, '\t', 0) + for _, cmd := range commands { + fmt.Fprintf(tw, "cbt %s\t%s\n", cmd.Name, cmd.Desc) + } + tw.Flush() + buf.WriteString(configHelp) + cmdSummary = buf.String() +} + +var configHelp = ` +For convenience, values of the -project, -zone, -cluster and -creds flags +may be specified in ` + cbtrc.Filename() + ` in this format: + project = my-project-123 + zone = us-central1-b + cluster = my-cluster + creds = path-to-account-key.json +All values are optional, and all will be overridden by flags. +` + +var commands = []struct { + Name, Desc string + do func(context.Context, ...string) + Usage string +}{ + { + Name: "count", + Desc: "Count rows in a table", + do: doCount, + Usage: "cbt count ", + }, + { + Name: "createfamily", + Desc: "Create a column family", + do: doCreateFamily, + Usage: "cbt createfamily
", + }, + { + Name: "createtable", + Desc: "Create a table", + do: doCreateTable, + Usage: "cbt createtable
", + }, + { + Name: "deletefamily", + Desc: "Delete a column family", + do: doDeleteFamily, + Usage: "cbt deletefamily
", + }, + { + Name: "deleterow", + Desc: "Delete a row", + do: doDeleteRow, + Usage: "cbt deleterow
", + }, + { + Name: "deletetable", + Desc: "Delete a table", + do: doDeleteTable, + Usage: "cbt deletetable
", + }, + { + Name: "doc", + Desc: "Print documentation for cbt", + do: doDoc, + Usage: "cbt doc", + }, + { + Name: "help", + Desc: "Print help text", + do: doHelp, + Usage: "cbt help [command]", + }, + { + Name: "listclusters", + Desc: "List clusters in a project", + do: doListClusters, + Usage: "cbt listclusters", + }, + { + Name: "lookup", + Desc: "Read from a single row", + do: doLookup, + Usage: "cbt lookup
", + }, + { + Name: "ls", + Desc: "List tables and column families", + do: doLS, + Usage: "cbt ls List tables\n" + + "cbt ls
List column families in
", + }, + { + Name: "read", + Desc: "Read rows", + do: doRead, + Usage: "cbt read
[start=] [limit=] [prefix=]\n" + + " start= Start reading at this row\n" + + " limit= Stop reading before this row\n" + + " prefix= Read rows with this prefix\n", + }, + { + Name: "set", + Desc: "Set value of a cell", + do: doSet, + Usage: "cbt set
family:column=val[@ts] ...\n" + + " family:column=val[@ts] may be repeated to set multiple cells.\n" + + "\n" + + " ts is an optional integer timestamp.\n" + + " If it cannot be parsed, the `@ts` part will be\n" + + " interpreted as part of the value.", + }, + /* TODO(dsymonds): Re-enable when there's a ClusterAdmin API. + { + Name: "setclustersize", + Desc: "Set size of a cluster", + do: doSetClusterSize, + Usage: "cbt setclustersize ", + }, + */ +} + +func doCount(ctx context.Context, args ...string) { + if len(args) != 1 { + log.Fatal("usage: cbt count
") + } + tbl := getClient().Open(args[0]) + + n := 0 + err := tbl.ReadRows(ctx, bigtable.InfiniteRange(""), func(_ bigtable.Row) bool { + n++ + return true + }, bigtable.RowFilter(bigtable.StripValueFilter())) + if err != nil { + log.Fatalf("Reading rows: %v", err) + } + fmt.Println(n) +} + +func doCreateFamily(ctx context.Context, args ...string) { + if len(args) != 2 { + log.Fatal("usage: cbt createfamily
") + } + err := getAdminClient().CreateColumnFamily(ctx, args[0], args[1]) + if err != nil { + log.Fatalf("Creating column family: %v", err) + } +} + +func doCreateTable(ctx context.Context, args ...string) { + if len(args) != 1 { + log.Fatal("usage: cbt createtable
") + } + err := getAdminClient().CreateTable(ctx, args[0]) + if err != nil { + log.Fatalf("Creating table: %v", err) + } +} + +func doDeleteFamily(ctx context.Context, args ...string) { + if len(args) != 2 { + log.Fatal("usage: cbt deletefamily
") + } + err := getAdminClient().DeleteColumnFamily(ctx, args[0], args[1]) + if err != nil { + log.Fatalf("Deleting column family: %v", err) + } +} + +func doDeleteRow(ctx context.Context, args ...string) { + if len(args) != 2 { + log.Fatal("usage: cbt deleterow
") + } + tbl := getClient().Open(args[0]) + mut := bigtable.NewMutation() + mut.DeleteRow() + if err := tbl.Apply(ctx, args[1], mut); err != nil { + log.Fatalf("Deleting row: %v", err) + } +} + +func doDeleteTable(ctx context.Context, args ...string) { + if len(args) != 1 { + log.Fatalf("Can't do `cbt deletetable %s`", args) + } + err := getAdminClient().DeleteTable(ctx, args[0]) + if err != nil { + log.Fatalf("Deleting table: %v", err) + } +} + +// to break circular dependencies +var ( + doDocFn func(ctx context.Context, args ...string) + doHelpFn func(ctx context.Context, args ...string) +) + +func init() { + doDocFn = doDocReal + doHelpFn = doHelpReal +} + +func doDoc(ctx context.Context, args ...string) { doDocFn(ctx, args...) } +func doHelp(ctx context.Context, args ...string) { doHelpFn(ctx, args...) } + +func doDocReal(ctx context.Context, args ...string) { + data := map[string]interface{}{ + "Commands": commands, + } + var buf bytes.Buffer + if err := docTemplate.Execute(&buf, data); err != nil { + log.Fatalf("Bad doc template: %v", err) + } + out, err := format.Source(buf.Bytes()) + if err != nil { + log.Fatalf("Bad doc output: %v", err) + } + os.Stdout.Write(out) +} + +var docTemplate = template.Must(template.New("doc").Funcs(template.FuncMap{ + "indent": func(s, ind string) string { + ss := strings.Split(s, "\n") + for i, p := range ss { + ss[i] = ind + p + } + return strings.Join(ss, "\n") + }, +}). + Parse(` +// DO NOT EDIT. THIS IS AUTOMATICALLY GENERATED. +// Run "go generate" to regenerate. +//go:generate go run cbt.go -o cbtdoc.go doc + +/* +Cbt is a tool for doing basic interactions with Cloud Bigtable. + +Usage: + + cbt [options] command [arguments] + +The commands are: +{{range .Commands}} + {{printf "%-25s %s" .Name .Desc}}{{end}} + +Use "cbt help " for more information about a command. + +{{range .Commands}} +{{.Desc}} + +Usage: +{{indent .Usage "\t"}} + + + +{{end}} +*/ +package main +`)) + +func doHelpReal(ctx context.Context, args ...string) { + if len(args) == 0 { + fmt.Print(cmdSummary) + return + } + for _, cmd := range commands { + if cmd.Name == args[0] { + fmt.Println(cmd.Usage) + return + } + } + log.Fatalf("Don't know command %q", args[0]) +} + +func doListClusters(ctx context.Context, args ...string) { + if len(args) != 0 { + log.Fatalf("usage: cbt listclusters") + } + cis, err := getClusterAdminClient().Clusters(ctx) + if err != nil { + log.Fatalf("Getting list of clusters: %v", err) + } + tw := tabwriter.NewWriter(os.Stdout, 10, 8, 4, '\t', 0) + fmt.Fprintf(tw, "Cluster Name\tZone\tInfo\n") + fmt.Fprintf(tw, "------------\t----\t----\n") + for _, ci := range cis { + fmt.Fprintf(tw, "%s\t%s\t%s (%d serve nodes)\n", ci.Name, ci.Zone, ci.DisplayName, ci.ServeNodes) + } + tw.Flush() +} + +func doLookup(ctx context.Context, args ...string) { + if len(args) != 2 { + log.Fatalf("usage: cbt lookup
") + } + table, row := args[0], args[1] + tbl := getClient().Open(table) + r, err := tbl.ReadRow(ctx, row) + if err != nil { + log.Fatalf("Reading row: %v", err) + } + printRow(r) +} + +func printRow(r bigtable.Row) { + fmt.Println(strings.Repeat("-", 40)) + fmt.Println(r.Key()) + + var fams []string + for fam := range r { + fams = append(fams, fam) + } + sort.Strings(fams) + for _, fam := range fams { + ris := r[fam] + sort.Sort(byColumn(ris)) + for _, ri := range ris { + ts := time.Unix(0, int64(ri.Timestamp)*1e3) + fmt.Printf(" %-40s @ %s\n", ri.Column, ts.Format("2006/01/02-15:04:05.000000")) + fmt.Printf(" %q\n", ri.Value) + } + } +} + +type byColumn []bigtable.ReadItem + +func (b byColumn) Len() int { return len(b) } +func (b byColumn) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byColumn) Less(i, j int) bool { return b[i].Column < b[j].Column } + +func doLS(ctx context.Context, args ...string) { + switch len(args) { + default: + log.Fatalf("Can't do `cbt ls %s`", args) + case 0: + tables, err := getAdminClient().Tables(ctx) + if err != nil { + log.Fatalf("Getting list of tables: %v", err) + } + sort.Strings(tables) + for _, table := range tables { + fmt.Println(table) + } + case 1: + table := args[0] + ti, err := getAdminClient().TableInfo(ctx, table) + if err != nil { + log.Fatalf("Getting table info: %v", err) + } + sort.Strings(ti.Families) + for _, fam := range ti.Families { + fmt.Println(fam) + } + } +} + +func doRead(ctx context.Context, args ...string) { + if len(args) < 1 { + log.Fatalf("usage: cbt read
[args ...]") + } + tbl := getClient().Open(args[0]) + + parsed := make(map[string]string) + for _, arg := range args[1:] { + i := strings.Index(arg, "=") + if i < 0 { + log.Fatalf("Bad arg %q", arg) + } + key, val := arg[:i], arg[i+1:] + switch key { + default: + log.Fatalf("Unknown arg key %q", key) + case "start", "limit", "prefix": + parsed[key] = val + } + } + if (parsed["start"] != "" || parsed["limit"] != "") && parsed["prefix"] != "" { + log.Fatal(`"start"/"limit" may not be mixed with "prefix"`) + } + + var rr bigtable.RowRange + if start, limit := parsed["start"], parsed["limit"]; limit != "" { + rr = bigtable.NewRange(start, limit) + } else if start != "" { + rr = bigtable.InfiniteRange(start) + } + if prefix := parsed["prefix"]; prefix != "" { + rr = bigtable.PrefixRange(prefix) + } + + // TODO(dsymonds): Support filters. + err := tbl.ReadRows(ctx, rr, func(r bigtable.Row) bool { + printRow(r) + return true + }) + if err != nil { + log.Fatalf("Reading rows: %v", err) + } +} + +var setArg = regexp.MustCompile(`([^:]+):([^=]*)=(.*)`) + +func doSet(ctx context.Context, args ...string) { + if len(args) < 3 { + log.Fatalf("usage: cbt set
family:[column]=val[@ts] ...") + } + tbl := getClient().Open(args[0]) + row := args[1] + mut := bigtable.NewMutation() + for _, arg := range args[2:] { + m := setArg.FindStringSubmatch(arg) + if m == nil { + log.Fatalf("Bad set arg %q", arg) + } + val := m[3] + ts := bigtable.Now() + if i := strings.LastIndex(val, "@"); i >= 0 { + // Try parsing a timestamp. + n, err := strconv.ParseInt(val[i+1:], 0, 64) + if err == nil { + val = val[:i] + ts = bigtable.Timestamp(n) + } + } + mut.Set(m[1], m[2], ts, []byte(val)) + } + if err := tbl.Apply(ctx, row, mut); err != nil { + log.Fatalf("Applying mutation: %v", err) + } +} + +/* TODO(dsymonds): Re-enable when there's a ClusterAdmin API. +func doSetClusterSize(ctx context.Context, args ...string) { + if len(args) != 1 { + log.Fatalf("usage: cbt setclustersize ") + } + n, err := strconv.ParseInt(args[0], 0, 32) + if err != nil { + log.Fatalf("Bad num_nodes value %q: %v", args[0], err) + } + if err := getAdminClient().SetClusterSize(ctx, int(n)); err != nil { + log.Fatalf("Setting cluster size: %v", err) + } +} +*/ diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/cmd/cbt/cbtdoc.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/cmd/cbt/cbtdoc.go new file mode 100644 index 000000000..0e0036722 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/cmd/cbt/cbtdoc.go @@ -0,0 +1,146 @@ +// DO NOT EDIT. THIS IS AUTOMATICALLY GENERATED. +// Run "go generate" to regenerate. +//go:generate go run cbt.go -o cbtdoc.go doc + +/* +Cbt is a tool for doing basic interactions with Cloud Bigtable. + +Usage: + + cbt [options] command [arguments] + +The commands are: + + count Count rows in a table + createfamily Create a column family + createtable Create a table + deletefamily Delete a column family + deleterow Delete a row + deletetable Delete a table + doc Print documentation for cbt + help Print help text + listclusters List clusters in a project + lookup Read from a single row + ls List tables and column families + read Read rows + set Set value of a cell + +Use "cbt help " for more information about a command. + + +Count rows in a table + +Usage: + cbt count
+ + + + +Create a column family + +Usage: + cbt createfamily
+ + + + +Create a table + +Usage: + cbt createtable
+ + + + +Delete a column family + +Usage: + cbt deletefamily
+ + + + +Delete a row + +Usage: + cbt deleterow
+ + + + +Delete a table + +Usage: + cbt deletetable
+ + + + +Print documentation for cbt + +Usage: + cbt doc + + + + +Print help text + +Usage: + cbt help [command] + + + + +List clusters in a project + +Usage: + cbt listclusters + + + + +Read from a single row + +Usage: + cbt lookup
+ + + + +List tables and column families + +Usage: + cbt ls List tables + cbt ls
List column families in
+ + + + +Read rows + +Usage: + cbt read
[start=] [limit=] [prefix=] + start= Start reading at this row + limit= Stop reading before this row + prefix= Read rows with this prefix + + + + + +Set value of a cell + +Usage: + cbt set
family:column=val[@ts] ... + family:column=val[@ts] may be repeated to set multiple cells. + + ts is an optional integer timestamp. + If it cannot be parsed, the `@ts` part will be + interpreted as part of the value. + + + + +*/ +package main diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/cmd/loadtest/loadtest.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/cmd/loadtest/loadtest.go new file mode 100644 index 000000000..ff5c5a151 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/cmd/loadtest/loadtest.go @@ -0,0 +1,159 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Loadtest does some load testing through the Go client library for Cloud Bigtable. +*/ +package main + +import ( + "bytes" + "flag" + "fmt" + "log" + "math/rand" + "os" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/context" + "google.golang.org/cloud/bigtable" + "google.golang.org/cloud/bigtable/internal/cbtrc" +) + +var ( + runFor = flag.Duration("run_for", 5*time.Second, "how long to run the load test for") + scratchTable = flag.String("scratch_table", "loadtest-scratch", "name of table to use; should not already exist") + + config *cbtrc.Config + client *bigtable.Client + adminClient *bigtable.AdminClient +) + +func main() { + var err error + config, err = cbtrc.Load() + if err != nil { + log.Fatal(err) + } + config.RegisterFlags() + + flag.Parse() + if err := config.CheckFlags(); err != nil { + log.Fatal(err) + } + if config.Creds != "" { + os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", config.Creds) + } + if flag.NArg() != 0 { + flag.Usage() + os.Exit(1) + } + + log.Printf("Dialing connections...") + client, err = bigtable.NewClient(context.Background(), config.Project, config.Zone, config.Cluster) + if err != nil { + log.Fatalf("Making bigtable.Client: %v", err) + } + defer client.Close() + adminClient, err = bigtable.NewAdminClient(context.Background(), config.Project, config.Zone, config.Cluster) + if err != nil { + log.Fatalf("Making bigtable.AdminClient: %v", err) + } + defer adminClient.Close() + + // Create a scratch table. + log.Printf("Setting up scratch table...") + if err := adminClient.CreateTable(context.Background(), *scratchTable); err != nil { + log.Fatalf("Making scratch table %q: %v", *scratchTable, err) + } + if err := adminClient.CreateColumnFamily(context.Background(), *scratchTable, "f"); err != nil { + log.Fatalf("Making scratch table column family: %v", err) + } + // Upon a successful run, delete the table. Don't bother checking for errors. + defer adminClient.DeleteTable(context.Background(), *scratchTable) + + log.Printf("Starting load test... (run for %v)", *runFor) + tbl := client.Open(*scratchTable) + sem := make(chan int, 100) // limit the number of requests happening at once + var reads, writes stats + stopTime := time.Now().Add(*runFor) + var wg sync.WaitGroup + for time.Now().Before(stopTime) { + sem <- 1 + wg.Add(1) + go func() { + defer wg.Done() + defer func() { <-sem }() + + ok := true + opStart := time.Now() + var stats *stats + defer func() { + stats.Record(ok, time.Since(opStart)) + }() + + row := fmt.Sprintf("row%d", rand.Intn(100)) // operate on 1 of 100 rows + + switch rand.Intn(10) { + default: + // read + stats = &reads + _, err := tbl.ReadRow(context.Background(), row, bigtable.RowFilter(bigtable.LatestNFilter(1))) + if err != nil { + log.Printf("Error doing read: %v", err) + ok = false + } + case 0, 1, 2, 3, 4: + // write + stats = &writes + mut := bigtable.NewMutation() + mut.Set("f", "col", bigtable.Now(), bytes.Repeat([]byte("0"), 1<<10)) // 1 KB write + if err := tbl.Apply(context.Background(), row, mut); err != nil { + log.Printf("Error doing mutation: %v", err) + ok = false + } + } + }() + } + wg.Wait() + + log.Printf("Reads (%d ok / %d tries):\n%v", reads.ok, reads.tries, newAggregate(reads.ds)) + log.Printf("Writes (%d ok / %d tries):\n%v", writes.ok, writes.tries, newAggregate(writes.ds)) +} + +var allStats int64 // atomic + +type stats struct { + mu sync.Mutex + tries, ok int + ds []time.Duration +} + +func (s *stats) Record(ok bool, d time.Duration) { + s.mu.Lock() + s.tries++ + if ok { + s.ok++ + } + s.ds = append(s.ds, d) + s.mu.Unlock() + + if n := atomic.AddInt64(&allStats, 1); n%1000 == 0 { + log.Printf("Progress: done %d ops", n) + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/cmd/loadtest/stats.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/cmd/loadtest/stats.go new file mode 100644 index 000000000..57c12da1e --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/cmd/loadtest/stats.go @@ -0,0 +1,97 @@ +package main + +import ( + "bytes" + "fmt" + "math" + "sort" + "text/tabwriter" + "time" +) + +type byDuration []time.Duration + +func (data byDuration) Len() int { return len(data) } +func (data byDuration) Swap(i, j int) { data[i], data[j] = data[j], data[i] } +func (data byDuration) Less(i, j int) bool { return data[i] < data[j] } + +// quantile returns a value representing the kth of q quantiles. +// May alter the order of data. +func quantile(data []time.Duration, k, q int) (quantile time.Duration, ok bool) { + if len(data) < 1 { + return 0, false + } + if k > q { + return 0, false + } + if k < 0 || q < 1 { + return 0, false + } + + sort.Sort(byDuration(data)) + + if k == 0 { + return data[0], true + } + if k == q { + return data[len(data)-1], true + } + + bucketSize := float64(len(data)-1) / float64(q) + i := float64(k) * bucketSize + + lower := int(math.Trunc(i)) + var upper int + if i > float64(lower) && lower+1 < len(data) { + // If the quantile lies between two elements + upper = lower + 1 + } else { + upper = lower + } + weightUpper := i - float64(lower) + weightLower := 1 - weightUpper + return time.Duration(weightLower*float64(data[lower]) + weightUpper*float64(data[upper])), true +} + +type aggregate struct { + min, median, max time.Duration + p95, p99 time.Duration // percentiles +} + +// newAggregate constructs an aggregate from latencies. Returns nil if latencies does not contain aggregateable data. +func newAggregate(latencies []time.Duration) *aggregate { + var agg aggregate + + if len(latencies) == 0 { + return nil + } + var ok bool + if agg.min, ok = quantile(latencies, 0, 2); !ok { + return nil + } + if agg.median, ok = quantile(latencies, 1, 2); !ok { + return nil + } + if agg.max, ok = quantile(latencies, 2, 2); !ok { + return nil + } + if agg.p95, ok = quantile(latencies, 95, 100); !ok { + return nil + } + if agg.p99, ok = quantile(latencies, 99, 100); !ok { + return nil + } + return &agg +} + +func (agg *aggregate) String() string { + if agg == nil { + return "no data" + } + var buf bytes.Buffer + tw := tabwriter.NewWriter(&buf, 0, 0, 1, ' ', 0) // one-space padding + fmt.Fprintf(tw, "min:\t%v\nmedian:\t%v\nmax:\t%v\n95th percentile:\t%v\n99th percentile:\t%v\n", + agg.min, agg.median, agg.max, agg.p95, agg.p99) + tw.Flush() + return buf.String() +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/doc.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/doc.go new file mode 100644 index 000000000..e3b14914a --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/doc.go @@ -0,0 +1,108 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package bigtable is an API to Google Cloud Bigtable. + +See https://cloud.google.com/bigtable/docs/ for general product documentation. + +Setup and Credentials + +Use NewClient or NewAdminClient to create a client that can be used to access +the data or admin APIs respectively. Both require credentials that have permission +to access the Cloud Bigtable API. + +If your program is run on Google App Engine or Google Compute Engine, using the Application Default Credentials +(https://developers.google.com/accounts/docs/application-default-credentials) +is the simplest option. Those credentials will be used by default when NewClient or NewAdminClient are called. + +To use alternate credentials, pass them to NewClient or NewAdminClient using cloud.WithTokenSource. +For instance, you can use service account credentials by visiting +https://cloud.google.com/console/project/MYPROJECT/apiui/credential, +creating a new OAuth "Client ID", storing the JSON key somewhere accessible, and writing + jsonKey, err := ioutil.ReadFile(pathToKeyFile) + ... + config, err := google.JWTConfigFromJSON(jsonKey, bigtable.Scope) // or bigtable.AdminScope, etc. + ... + client, err := bigtable.NewClient(ctx, project, zone, cluster, cloud.WithTokenSource(config.TokenSource(ctx))) + ... +Here, `google` means the golang.org/x/oauth2/google package +and `cloud` means the google.golang.org/cloud package. + +Reading + +The principal way to read from a Bigtable is to use the ReadRows method on *Table. +A RowRange specifies a contiguous portion of a table. A Filter may be provided through +RowFilter to limit or transform the data that is returned. + tbl := client.Open("mytable") + ... + // Read all the rows starting with "com.google.", + // but only fetch the columns in the "links" family. + rr := bigtable.PrefixRange("com.google.") + err := tbl.ReadRows(ctx, rr, func(r Row) bool { + // do something with r + return true // keep going + }, bigtable.RowFilter(bigtable.FamilyFilter("links"))) + ... + +To read a single row, use the ReadRow helper method. + r, err := tbl.ReadRow(ctx, "com.google.cloud") // "com.google.cloud" is the entire row key + ... + +Writing + +This API exposes two distinct forms of writing to a Bigtable: a Mutation and a ReadModifyWrite. +The former expresses idempotent operations. +The latter expresses non-idempotent operations and returns the new values of updated cells. +These operations are performed by creating a Mutation or ReadModifyWrite (with NewMutation or NewReadModifyWrite), +building up one or more operations on that, and then using the Apply or ApplyReadModifyWrite +methods on a Table. + +For instance, to set a couple of cells in a table, + tbl := client.Open("mytable") + mut := bigtable.NewMutation() + mut.Set("links", "maps.google.com", bigtable.Now(), []byte("1")) + mut.Set("links", "golang.org", bigtable.Now(), []byte("1")) + err := tbl.Apply(ctx, "com.google.cloud", mut) + ... + +To increment an encoded value in one cell, + tbl := client.Open("mytable") + rmw := bigtable.NewReadModifyWrite() + rmw.Increment("links", "golang.org", 12) // add 12 to the cell in column "links:golang.org" + r, err := tbl.ApplyReadModifyWrite(ctx, "com.google.cloud", rmw) + ... +*/ +package bigtable + +// Scope constants for authentication credentials. +// These should be used when using credential creation functions such as oauth.NewServiceAccountFromFile. +const ( + // Scope is the OAuth scope for Cloud Bigtable data operations. + Scope = "https://www.googleapis.com/auth/bigtable.data" + // ReadonlyScope is the OAuth scope for Cloud Bigtable read-only data operations. + ReadonlyScope = "https://www.googleapis.com/auth/bigtable.readonly" + + // AdminScope is the OAuth scope for Cloud Bigtable table admin operations. + AdminScope = "https://www.googleapis.com/auth/bigtable.admin.table" + + // ClusterAdminScope is the OAuth scope for Cloud Bigtable cluster admin operations. + ClusterAdminScope = "https://www.googleapis.com/auth/bigtable.admin.cluster" +) + +// clientUserAgent identifies the version of this package. +// It should be bumped upon significant changes only. +const clientUserAgent = "cbt-go/20150727" diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/filter.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/filter.go new file mode 100644 index 000000000..dbe247b1b --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/filter.go @@ -0,0 +1,156 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bigtable + +import ( + "fmt" + "strings" + + btdpb "google.golang.org/cloud/bigtable/internal/data_proto" +) + +// A Filter represents a row filter. +type Filter interface { + String() string + proto() *btdpb.RowFilter +} + +// ChainFilters returns a filter that applies a sequence of filters. +func ChainFilters(sub ...Filter) Filter { return chainFilter{sub} } + +type chainFilter struct { + sub []Filter +} + +func (cf chainFilter) String() string { + var ss []string + for _, sf := range cf.sub { + ss = append(ss, sf.String()) + } + return "(" + strings.Join(ss, " | ") + ")" +} + +func (cf chainFilter) proto() *btdpb.RowFilter { + chain := &btdpb.RowFilter_Chain{} + for _, sf := range cf.sub { + chain.Filters = append(chain.Filters, sf.proto()) + } + return &btdpb.RowFilter{ + Filter: &btdpb.RowFilter_Chain_{chain}, + } +} + +// InterleaveFilters returns a filter that applies a set of filters in parallel +// and interleaves the results. +func InterleaveFilters(sub ...Filter) Filter { return interleaveFilter{sub} } + +type interleaveFilter struct { + sub []Filter +} + +func (ilf interleaveFilter) String() string { + var ss []string + for _, sf := range ilf.sub { + ss = append(ss, sf.String()) + } + return "(" + strings.Join(ss, " + ") + ")" +} + +func (ilf interleaveFilter) proto() *btdpb.RowFilter { + inter := &btdpb.RowFilter_Interleave{} + for _, sf := range ilf.sub { + inter.Filters = append(inter.Filters, sf.proto()) + } + return &btdpb.RowFilter{ + Filter: &btdpb.RowFilter_Interleave_{inter}, + } +} + +// RowKeyFilter returns a filter that matches cells from rows whose +// key matches the provided RE2 pattern. +// See https://github.com/google/re2/wiki/Syntax for the accepted syntax. +func RowKeyFilter(pattern string) Filter { return rowKeyFilter(pattern) } + +type rowKeyFilter string + +func (rkf rowKeyFilter) String() string { return fmt.Sprintf("row(%s)", string(rkf)) } + +func (rkf rowKeyFilter) proto() *btdpb.RowFilter { + return &btdpb.RowFilter{Filter: &btdpb.RowFilter_RowKeyRegexFilter{[]byte(rkf)}} +} + +// FamilyFilter returns a filter that matches cells whose family name +// matches the provided RE2 pattern. +// See https://github.com/google/re2/wiki/Syntax for the accepted syntax. +func FamilyFilter(pattern string) Filter { return familyFilter(pattern) } + +type familyFilter string + +func (ff familyFilter) String() string { return fmt.Sprintf("col(%s:)", string(ff)) } + +func (ff familyFilter) proto() *btdpb.RowFilter { + return &btdpb.RowFilter{Filter: &btdpb.RowFilter_FamilyNameRegexFilter{string(ff)}} +} + +// ColumnFilter returns a filter that matches cells whose column name +// matches the provided RE2 pattern. +// See https://github.com/google/re2/wiki/Syntax for the accepted syntax. +func ColumnFilter(pattern string) Filter { return columnFilter(pattern) } + +type columnFilter string + +func (cf columnFilter) String() string { return fmt.Sprintf("col(.*:%s)", string(cf)) } + +func (cf columnFilter) proto() *btdpb.RowFilter { + return &btdpb.RowFilter{Filter: &btdpb.RowFilter_ColumnQualifierRegexFilter{[]byte(cf)}} +} + +// ValueFilter returns a filter that matches cells whose value +// matches the provided RE2 pattern. +// See https://github.com/google/re2/wiki/Syntax for the accepted syntax. +func ValueFilter(pattern string) Filter { return valueFilter(pattern) } + +type valueFilter string + +func (vf valueFilter) String() string { return fmt.Sprintf("value_match(%s)", string(vf)) } + +func (vf valueFilter) proto() *btdpb.RowFilter { + return &btdpb.RowFilter{Filter: &btdpb.RowFilter_ValueRegexFilter{[]byte(vf)}} +} + +// LatestNFilter returns a filter that matches the most recent N cells in each column. +func LatestNFilter(n int) Filter { return latestNFilter(n) } + +type latestNFilter int32 + +func (lnf latestNFilter) String() string { return fmt.Sprintf("col(*,%d)", lnf) } + +func (lnf latestNFilter) proto() *btdpb.RowFilter { + return &btdpb.RowFilter{Filter: &btdpb.RowFilter_CellsPerColumnLimitFilter{int32(lnf)}} +} + +// StripValueFilter returns a filter that replaces each value with the empty string. +func StripValueFilter() Filter { return stripValueFilter{} } + +type stripValueFilter struct{} + +func (stripValueFilter) String() string { return "strip_value()" } +func (stripValueFilter) proto() *btdpb.RowFilter { + return &btdpb.RowFilter{Filter: &btdpb.RowFilter_StripValueTransformer{true}} +} + +// TODO(dsymonds): More filters: cond, col/ts/value range, sampling diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/gc.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/gc.go new file mode 100644 index 000000000..84499fcd2 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/gc.go @@ -0,0 +1,131 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bigtable + +import ( + "fmt" + "strings" + "time" + + durpb "google.golang.org/cloud/bigtable/internal/duration_proto" + bttdpb "google.golang.org/cloud/bigtable/internal/table_data_proto" +) + +// A GCPolicy represents a rule that determines which cells are eligible for garbage collection. +type GCPolicy interface { + String() string + proto() *bttdpb.GcRule +} + +// IntersectionPolicy returns a GC policy that only applies when all its sub-policies apply. +func IntersectionPolicy(sub ...GCPolicy) GCPolicy { return intersectionPolicy{sub} } + +type intersectionPolicy struct { + sub []GCPolicy +} + +func (ip intersectionPolicy) String() string { + var ss []string + for _, sp := range ip.sub { + ss = append(ss, sp.String()) + } + return "(" + strings.Join(ss, " && ") + ")" +} + +func (ip intersectionPolicy) proto() *bttdpb.GcRule { + inter := &bttdpb.GcRule_Intersection{} + for _, sp := range ip.sub { + inter.Rules = append(inter.Rules, sp.proto()) + } + return &bttdpb.GcRule{ + Rule: &bttdpb.GcRule_Intersection_{inter}, + } +} + +// UnionPolicy returns a GC policy that applies when any of its sub-policies apply. +func UnionPolicy(sub ...GCPolicy) GCPolicy { return unionPolicy{sub} } + +type unionPolicy struct { + sub []GCPolicy +} + +func (up unionPolicy) String() string { + var ss []string + for _, sp := range up.sub { + ss = append(ss, sp.String()) + } + return "(" + strings.Join(ss, " || ") + ")" +} + +func (up unionPolicy) proto() *bttdpb.GcRule { + union := &bttdpb.GcRule_Union{} + for _, sp := range up.sub { + union.Rules = append(union.Rules, sp.proto()) + } + return &bttdpb.GcRule{ + Rule: &bttdpb.GcRule_Union_{union}, + } +} + +// MaxVersionsPolicy returns a GC policy that applies to all versions of a cell +// except for the most recent n. +func MaxVersionsPolicy(n int) GCPolicy { return maxVersionsPolicy(n) } + +type maxVersionsPolicy int + +func (mvp maxVersionsPolicy) String() string { return fmt.Sprintf("versions() > %d", int(mvp)) } + +func (mvp maxVersionsPolicy) proto() *bttdpb.GcRule { + return &bttdpb.GcRule{Rule: &bttdpb.GcRule_MaxNumVersions{int32(mvp)}} +} + +// MaxAgePolicy returns a GC policy that applies to all cells +// older than the given age. +func MaxAgePolicy(d time.Duration) GCPolicy { return maxAgePolicy(d) } + +type maxAgePolicy time.Duration + +var units = []struct { + d time.Duration + suffix string +}{ + {24 * time.Hour, "d"}, + {time.Hour, "h"}, + {time.Minute, "m"}, +} + +func (ma maxAgePolicy) String() string { + d := time.Duration(ma) + for _, u := range units { + if d%u.d == 0 { + return fmt.Sprintf("age() > %d%s", d/u.d, u.suffix) + } + } + return fmt.Sprintf("age() > %d", d/time.Microsecond) +} + +func (ma maxAgePolicy) proto() *bttdpb.GcRule { + // This doesn't handle overflows, etc. + // Fix this if people care about GC policies over 290 years. + ns := time.Duration(ma).Nanoseconds() + return &bttdpb.GcRule{ + Rule: &bttdpb.GcRule_MaxAge{&durpb.Duration{ + Seconds: ns / 1e9, + Nanos: int32(ns % 1e9), + }}, + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cbtrc/cbtrc.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cbtrc/cbtrc.go new file mode 100644 index 000000000..fec4ab3fd --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cbtrc/cbtrc.go @@ -0,0 +1,105 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cbtrc encapsulates common code for reading .cbtrc files. +package cbtrc + +import ( + "bufio" + "bytes" + "flag" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" +) + +// Config represents a configuration. +type Config struct { + Project, Zone, Cluster string // required + Creds string // optional +} + +// RegisterFlags registers a set of standard flags for this config. +// It should be called before flag.Parse. +func (c *Config) RegisterFlags() { + flag.StringVar(&c.Project, "project", c.Project, "project ID") + flag.StringVar(&c.Zone, "zone", c.Zone, "CBT zone") + flag.StringVar(&c.Cluster, "cluster", c.Cluster, "CBT cluster") + flag.StringVar(&c.Creds, "creds", c.Creds, "if set, use application credentials in this file") +} + +// CheckFlags checks that the required config values are set. +func (c *Config) CheckFlags() error { + var missing []string + if c.Project == "" { + missing = append(missing, "-project") + } + if c.Zone == "" { + missing = append(missing, "-zone") + } + if c.Cluster == "" { + missing = append(missing, "-cluster") + } + if len(missing) > 0 { + return fmt.Errorf("Missing %s", strings.Join(missing, " and ")) + } + return nil +} + +// Filename returns the filename consulted for standard configuration. +func Filename() string { + // TODO(dsymonds): Might need tweaking for Windows. + return filepath.Join(os.Getenv("HOME"), ".cbtrc") +} + +// Load loads a .cbtrc file. +// If the file is not present, an empty config is returned. +func Load() (*Config, error) { + filename := Filename() + data, err := ioutil.ReadFile(filename) + if err != nil { + // silent fail if the file isn't there + if os.IsNotExist(err) { + return &Config{}, nil + } + return nil, fmt.Errorf("Reading %s: %v", filename, err) + } + c := new(Config) + s := bufio.NewScanner(bytes.NewReader(data)) + for s.Scan() { + line := s.Text() + i := strings.Index(line, "=") + if i < 0 { + return nil, fmt.Errorf("Bad line in %s: %q", filename, line) + } + key, val := strings.TrimSpace(line[:i]), strings.TrimSpace(line[i+1:]) + switch key { + default: + return nil, fmt.Errorf("Unknown key in %s: %q", filename, key) + case "project": + c.Project = val + case "zone": + c.Zone = val + case "cluster": + c.Cluster = val + case "creds": + c.Creds = val + } + } + return c, s.Err() +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_data_proto/bigtable_cluster_data.pb.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_data_proto/bigtable_cluster_data.pb.go new file mode 100644 index 000000000..4d5a27c76 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_data_proto/bigtable_cluster_data.pb.go @@ -0,0 +1,119 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/cloud/bigtable/internal/cluster_data_proto/bigtable_cluster_data.proto +// DO NOT EDIT! + +/* +Package google_bigtable_admin_cluster_v1 is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/cloud/bigtable/internal/cluster_data_proto/bigtable_cluster_data.proto + +It has these top-level messages: + Zone + Cluster +*/ +package google_bigtable_admin_cluster_v1 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type StorageType int32 + +const ( + // The storage type used is unspecified. + StorageType_STORAGE_UNSPECIFIED StorageType = 0 + // Data will be stored in SSD, providing low and consistent latencies. + StorageType_STORAGE_SSD StorageType = 1 +) + +var StorageType_name = map[int32]string{ + 0: "STORAGE_UNSPECIFIED", + 1: "STORAGE_SSD", +} +var StorageType_value = map[string]int32{ + "STORAGE_UNSPECIFIED": 0, + "STORAGE_SSD": 1, +} + +func (x StorageType) String() string { + return proto.EnumName(StorageType_name, int32(x)) +} + +// Possible states of a zone. +type Zone_Status int32 + +const ( + // The state of the zone is unknown or unspecified. + Zone_UNKNOWN Zone_Status = 0 + // The zone is in a good state. + Zone_OK Zone_Status = 1 + // The zone is down for planned maintenance. + Zone_PLANNED_MAINTENANCE Zone_Status = 2 + // The zone is down for emergency or unplanned maintenance. + Zone_EMERGENCY_MAINENANCE Zone_Status = 3 +) + +var Zone_Status_name = map[int32]string{ + 0: "UNKNOWN", + 1: "OK", + 2: "PLANNED_MAINTENANCE", + 3: "EMERGENCY_MAINENANCE", +} +var Zone_Status_value = map[string]int32{ + "UNKNOWN": 0, + "OK": 1, + "PLANNED_MAINTENANCE": 2, + "EMERGENCY_MAINENANCE": 3, +} + +func (x Zone_Status) String() string { + return proto.EnumName(Zone_Status_name, int32(x)) +} + +// A physical location in which a particular project can allocate Cloud BigTable +// resources. +type Zone struct { + // A permanent unique identifier for the zone. + // Values are of the form projects//zones/[a-z][-a-z0-9]* + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The name of this zone as it appears in UIs. + DisplayName string `protobuf:"bytes,2,opt,name=display_name" json:"display_name,omitempty"` + // The current state of this zone. + Status Zone_Status `protobuf:"varint,3,opt,name=status,enum=google.bigtable.admin.cluster.v1.Zone_Status" json:"status,omitempty"` +} + +func (m *Zone) Reset() { *m = Zone{} } +func (m *Zone) String() string { return proto.CompactTextString(m) } +func (*Zone) ProtoMessage() {} + +// An isolated set of Cloud BigTable resources on which tables can be hosted. +type Cluster struct { + // A permanent unique identifier for the cluster. For technical reasons, the + // zone in which the cluster resides is included here. + // Values are of the form + // projects//zones//clusters/[a-z][-a-z0-9]* + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The descriptive name for this cluster as it appears in UIs. + // Must be unique per zone. + DisplayName string `protobuf:"bytes,4,opt,name=display_name" json:"display_name,omitempty"` + // The number of serve nodes allocated to this cluster. + ServeNodes int32 `protobuf:"varint,5,opt,name=serve_nodes" json:"serve_nodes,omitempty"` + // What storage type to use for tables in this cluster. Only configurable at + // cluster creation time. If unspecified, STORAGE_SSD will be used. + DefaultStorageType StorageType `protobuf:"varint,8,opt,name=default_storage_type,enum=google.bigtable.admin.cluster.v1.StorageType" json:"default_storage_type,omitempty"` +} + +func (m *Cluster) Reset() { *m = Cluster{} } +func (m *Cluster) String() string { return proto.CompactTextString(m) } +func (*Cluster) ProtoMessage() {} + +func init() { + proto.RegisterEnum("google.bigtable.admin.cluster.v1.StorageType", StorageType_name, StorageType_value) + proto.RegisterEnum("google.bigtable.admin.cluster.v1.Zone_Status", Zone_Status_name, Zone_Status_value) +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_data_proto/bigtable_cluster_data.proto b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_data_proto/bigtable_cluster_data.proto new file mode 100644 index 000000000..af39559b3 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_data_proto/bigtable_cluster_data.proto @@ -0,0 +1,89 @@ +// Copyright (c) 2015, Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.admin.cluster.v1; + + +option java_multiple_files = true; +option java_outer_classname = "BigtableClusterDataProto"; +option java_package = "com.google.bigtable.admin.cluster.v1"; + + +// A physical location in which a particular project can allocate Cloud BigTable +// resources. +message Zone { + // Possible states of a zone. + enum Status { + // The state of the zone is unknown or unspecified. + UNKNOWN = 0; + + // The zone is in a good state. + OK = 1; + + // The zone is down for planned maintenance. + PLANNED_MAINTENANCE = 2; + + // The zone is down for emergency or unplanned maintenance. + EMERGENCY_MAINENANCE = 3; + } + + // A permanent unique identifier for the zone. + // Values are of the form projects//zones/[a-z][-a-z0-9]* + string name = 1; + + // The name of this zone as it appears in UIs. + string display_name = 2; + + // The current state of this zone. + Status status = 3; +} + +// An isolated set of Cloud BigTable resources on which tables can be hosted. +message Cluster { + // A permanent unique identifier for the cluster. For technical reasons, the + // zone in which the cluster resides is included here. + // Values are of the form + // projects//zones//clusters/[a-z][-a-z0-9]* + string name = 1; + + // If this cluster has been deleted, the time at which its backup will + // be irrevocably destroyed. Omitted otherwise. + // This cannot be set directly, only through DeleteCluster. + + // The operation currently running on the cluster, if any. + // This cannot be set directly, only through CreateCluster, UpdateCluster, + // or UndeleteCluster. Calls to these methods will be rejected if + // "current_operation" is already set. + + // The descriptive name for this cluster as it appears in UIs. + // Must be unique per zone. + string display_name = 4; + + // The number of serve nodes allocated to this cluster. + int32 serve_nodes = 5; + + // What storage type to use for tables in this cluster. Only configurable at + // cluster creation time. If unspecified, STORAGE_SSD will be used. + StorageType default_storage_type = 8; +} + +enum StorageType { + // The storage type used is unspecified. + STORAGE_UNSPECIFIED = 0; + + // Data will be stored in SSD, providing low and consistent latencies. + STORAGE_SSD = 1; +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service.pb.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service.pb.go new file mode 100644 index 000000000..32f84365a --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service.pb.go @@ -0,0 +1,331 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service.proto +// DO NOT EDIT! + +package google_bigtable_admin_cluster_v1 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_bigtable_admin_cluster_v11 "google.golang.org/cloud/bigtable/internal/cluster_data_proto" +import google_protobuf "google.golang.org/cloud/bigtable/internal/empty" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// Client API for BigtableClusterService service + +type BigtableClusterServiceClient interface { + // Lists the supported zones for the given project. + ListZones(ctx context.Context, in *ListZonesRequest, opts ...grpc.CallOption) (*ListZonesResponse, error) + // Gets information about a particular cluster. + GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*google_bigtable_admin_cluster_v11.Cluster, error) + // Lists all clusters in the given project, along with any zones for which + // cluster information could not be retrieved. + ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) + // Creates a cluster and begins preparing it to begin serving. The returned + // cluster embeds as its "current_operation" a long-running operation which + // can be used to track the progress of turning up the new cluster. + // Immediately upon completion of this request: + // * The cluster will be readable via the API, with all requested attributes + // but no allocated resources. + // Until completion of the embedded operation: + // * Cancelling the operation will render the cluster immediately unreadable + // via the API. + // * All other attempts to modify or delete the cluster will be rejected. + // Upon completion of the embedded operation: + // * Billing for all successfully-allocated resources will begin (some types + // may have lower than the requested levels). + // * New tables can be created in the cluster. + // * The cluster's allocated resource levels will be readable via the API. + // The embedded operation's "metadata" field type is + // [CreateClusterMetadata][google.bigtable.admin.cluster.v1.CreateClusterMetadata] The embedded operation's "response" field type is + // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. + CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*google_bigtable_admin_cluster_v11.Cluster, error) + // Updates a cluster, and begins allocating or releasing resources as + // requested. The returned cluster embeds as its "current_operation" a + // long-running operation which can be used to track the progress of updating + // the cluster. + // Immediately upon completion of this request: + // * For resource types where a decrease in the cluster's allocation has been + // requested, billing will be based on the newly-requested level. + // Until completion of the embedded operation: + // * Cancelling the operation will set its metadata's "cancelled_at_time", + // and begin restoring resources to their pre-request values. The operation + // is guaranteed to succeed at undoing all resource changes, after which + // point it will terminate with a CANCELLED status. + // * All other attempts to modify or delete the cluster will be rejected. + // * Reading the cluster via the API will continue to give the pre-request + // resource levels. + // Upon completion of the embedded operation: + // * Billing will begin for all successfully-allocated resources (some types + // may have lower than the requested levels). + // * All newly-reserved resources will be available for serving the cluster's + // tables. + // * The cluster's new resource levels will be readable via the API. + // [UpdateClusterMetadata][google.bigtable.admin.cluster.v1.UpdateClusterMetadata] The embedded operation's "response" field type is + // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. + UpdateCluster(ctx context.Context, in *google_bigtable_admin_cluster_v11.Cluster, opts ...grpc.CallOption) (*google_bigtable_admin_cluster_v11.Cluster, error) + // Marks a cluster and all of its tables for permanent deletion in 7 days. + // Immediately upon completion of the request: + // * Billing will cease for all of the cluster's reserved resources. + // * The cluster's "delete_time" field will be set 7 days in the future. + // Soon afterward: + // * All tables within the cluster will become unavailable. + // Prior to the cluster's "delete_time": + // * The cluster can be recovered with a call to UndeleteCluster. + // * All other attempts to modify or delete the cluster will be rejected. + // At the cluster's "delete_time": + // * The cluster and *all of its tables* will immediately and irrevocably + // disappear from the API, and their data will be permanently deleted. + DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) +} + +type bigtableClusterServiceClient struct { + cc *grpc.ClientConn +} + +func NewBigtableClusterServiceClient(cc *grpc.ClientConn) BigtableClusterServiceClient { + return &bigtableClusterServiceClient{cc} +} + +func (c *bigtableClusterServiceClient) ListZones(ctx context.Context, in *ListZonesRequest, opts ...grpc.CallOption) (*ListZonesResponse, error) { + out := new(ListZonesResponse) + err := grpc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/ListZones", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableClusterServiceClient) GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*google_bigtable_admin_cluster_v11.Cluster, error) { + out := new(google_bigtable_admin_cluster_v11.Cluster) + err := grpc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/GetCluster", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableClusterServiceClient) ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) { + out := new(ListClustersResponse) + err := grpc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/ListClusters", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableClusterServiceClient) CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*google_bigtable_admin_cluster_v11.Cluster, error) { + out := new(google_bigtable_admin_cluster_v11.Cluster) + err := grpc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/CreateCluster", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableClusterServiceClient) UpdateCluster(ctx context.Context, in *google_bigtable_admin_cluster_v11.Cluster, opts ...grpc.CallOption) (*google_bigtable_admin_cluster_v11.Cluster, error) { + out := new(google_bigtable_admin_cluster_v11.Cluster) + err := grpc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/UpdateCluster", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableClusterServiceClient) DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) { + out := new(google_protobuf.Empty) + err := grpc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/DeleteCluster", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for BigtableClusterService service + +type BigtableClusterServiceServer interface { + // Lists the supported zones for the given project. + ListZones(context.Context, *ListZonesRequest) (*ListZonesResponse, error) + // Gets information about a particular cluster. + GetCluster(context.Context, *GetClusterRequest) (*google_bigtable_admin_cluster_v11.Cluster, error) + // Lists all clusters in the given project, along with any zones for which + // cluster information could not be retrieved. + ListClusters(context.Context, *ListClustersRequest) (*ListClustersResponse, error) + // Creates a cluster and begins preparing it to begin serving. The returned + // cluster embeds as its "current_operation" a long-running operation which + // can be used to track the progress of turning up the new cluster. + // Immediately upon completion of this request: + // * The cluster will be readable via the API, with all requested attributes + // but no allocated resources. + // Until completion of the embedded operation: + // * Cancelling the operation will render the cluster immediately unreadable + // via the API. + // * All other attempts to modify or delete the cluster will be rejected. + // Upon completion of the embedded operation: + // * Billing for all successfully-allocated resources will begin (some types + // may have lower than the requested levels). + // * New tables can be created in the cluster. + // * The cluster's allocated resource levels will be readable via the API. + // The embedded operation's "metadata" field type is + // [CreateClusterMetadata][google.bigtable.admin.cluster.v1.CreateClusterMetadata] The embedded operation's "response" field type is + // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. + CreateCluster(context.Context, *CreateClusterRequest) (*google_bigtable_admin_cluster_v11.Cluster, error) + // Updates a cluster, and begins allocating or releasing resources as + // requested. The returned cluster embeds as its "current_operation" a + // long-running operation which can be used to track the progress of updating + // the cluster. + // Immediately upon completion of this request: + // * For resource types where a decrease in the cluster's allocation has been + // requested, billing will be based on the newly-requested level. + // Until completion of the embedded operation: + // * Cancelling the operation will set its metadata's "cancelled_at_time", + // and begin restoring resources to their pre-request values. The operation + // is guaranteed to succeed at undoing all resource changes, after which + // point it will terminate with a CANCELLED status. + // * All other attempts to modify or delete the cluster will be rejected. + // * Reading the cluster via the API will continue to give the pre-request + // resource levels. + // Upon completion of the embedded operation: + // * Billing will begin for all successfully-allocated resources (some types + // may have lower than the requested levels). + // * All newly-reserved resources will be available for serving the cluster's + // tables. + // * The cluster's new resource levels will be readable via the API. + // [UpdateClusterMetadata][google.bigtable.admin.cluster.v1.UpdateClusterMetadata] The embedded operation's "response" field type is + // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. + UpdateCluster(context.Context, *google_bigtable_admin_cluster_v11.Cluster) (*google_bigtable_admin_cluster_v11.Cluster, error) + // Marks a cluster and all of its tables for permanent deletion in 7 days. + // Immediately upon completion of the request: + // * Billing will cease for all of the cluster's reserved resources. + // * The cluster's "delete_time" field will be set 7 days in the future. + // Soon afterward: + // * All tables within the cluster will become unavailable. + // Prior to the cluster's "delete_time": + // * The cluster can be recovered with a call to UndeleteCluster. + // * All other attempts to modify or delete the cluster will be rejected. + // At the cluster's "delete_time": + // * The cluster and *all of its tables* will immediately and irrevocably + // disappear from the API, and their data will be permanently deleted. + DeleteCluster(context.Context, *DeleteClusterRequest) (*google_protobuf.Empty, error) +} + +func RegisterBigtableClusterServiceServer(s *grpc.Server, srv BigtableClusterServiceServer) { + s.RegisterService(&_BigtableClusterService_serviceDesc, srv) +} + +func _BigtableClusterService_ListZones_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { + in := new(ListZonesRequest) + if err := codec.Unmarshal(buf, in); err != nil { + return nil, err + } + out, err := srv.(BigtableClusterServiceServer).ListZones(ctx, in) + if err != nil { + return nil, err + } + return out, nil +} + +func _BigtableClusterService_GetCluster_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { + in := new(GetClusterRequest) + if err := codec.Unmarshal(buf, in); err != nil { + return nil, err + } + out, err := srv.(BigtableClusterServiceServer).GetCluster(ctx, in) + if err != nil { + return nil, err + } + return out, nil +} + +func _BigtableClusterService_ListClusters_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { + in := new(ListClustersRequest) + if err := codec.Unmarshal(buf, in); err != nil { + return nil, err + } + out, err := srv.(BigtableClusterServiceServer).ListClusters(ctx, in) + if err != nil { + return nil, err + } + return out, nil +} + +func _BigtableClusterService_CreateCluster_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { + in := new(CreateClusterRequest) + if err := codec.Unmarshal(buf, in); err != nil { + return nil, err + } + out, err := srv.(BigtableClusterServiceServer).CreateCluster(ctx, in) + if err != nil { + return nil, err + } + return out, nil +} + +func _BigtableClusterService_UpdateCluster_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { + in := new(google_bigtable_admin_cluster_v11.Cluster) + if err := codec.Unmarshal(buf, in); err != nil { + return nil, err + } + out, err := srv.(BigtableClusterServiceServer).UpdateCluster(ctx, in) + if err != nil { + return nil, err + } + return out, nil +} + +func _BigtableClusterService_DeleteCluster_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { + in := new(DeleteClusterRequest) + if err := codec.Unmarshal(buf, in); err != nil { + return nil, err + } + out, err := srv.(BigtableClusterServiceServer).DeleteCluster(ctx, in) + if err != nil { + return nil, err + } + return out, nil +} + +var _BigtableClusterService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.bigtable.admin.cluster.v1.BigtableClusterService", + HandlerType: (*BigtableClusterServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListZones", + Handler: _BigtableClusterService_ListZones_Handler, + }, + { + MethodName: "GetCluster", + Handler: _BigtableClusterService_GetCluster_Handler, + }, + { + MethodName: "ListClusters", + Handler: _BigtableClusterService_ListClusters_Handler, + }, + { + MethodName: "CreateCluster", + Handler: _BigtableClusterService_CreateCluster_Handler, + }, + { + MethodName: "UpdateCluster", + Handler: _BigtableClusterService_UpdateCluster_Handler, + }, + { + MethodName: "DeleteCluster", + Handler: _BigtableClusterService_DeleteCluster_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service.proto b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service.proto new file mode 100644 index 000000000..6243dcfb5 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service.proto @@ -0,0 +1,118 @@ +// Copyright (c) 2015, Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.admin.cluster.v1; + +import "google.golang.org/cloud/bigtable/internal/cluster_data_proto/bigtable_cluster_data.proto"; +import "google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service_messages.proto"; +import "google.golang.org/cloud/bigtable/internal/empty/empty.proto"; + +option java_multiple_files = true; +option java_outer_classname = "BigtableClusterServicesProto"; +option java_package = "com.google.bigtable.admin.cluster.v1"; + + +// Service for managing zonal Cloud Bigtable resources. +service BigtableClusterService { + // Lists the supported zones for the given project. + rpc ListZones(ListZonesRequest) returns (ListZonesResponse) { + } + + // Gets information about a particular cluster. + rpc GetCluster(GetClusterRequest) returns (Cluster) { + } + + // Lists all clusters in the given project, along with any zones for which + // cluster information could not be retrieved. + rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) { + } + + // Creates a cluster and begins preparing it to begin serving. The returned + // cluster embeds as its "current_operation" a long-running operation which + // can be used to track the progress of turning up the new cluster. + // Immediately upon completion of this request: + // * The cluster will be readable via the API, with all requested attributes + // but no allocated resources. + // Until completion of the embedded operation: + // * Cancelling the operation will render the cluster immediately unreadable + // via the API. + // * All other attempts to modify or delete the cluster will be rejected. + // Upon completion of the embedded operation: + // * Billing for all successfully-allocated resources will begin (some types + // may have lower than the requested levels). + // * New tables can be created in the cluster. + // * The cluster's allocated resource levels will be readable via the API. + // The embedded operation's "metadata" field type is + // [CreateClusterMetadata][google.bigtable.admin.cluster.v1.CreateClusterMetadata] The embedded operation's "response" field type is + // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. + rpc CreateCluster(CreateClusterRequest) returns (Cluster) { + } + + // Updates a cluster, and begins allocating or releasing resources as + // requested. The returned cluster embeds as its "current_operation" a + // long-running operation which can be used to track the progress of updating + // the cluster. + // Immediately upon completion of this request: + // * For resource types where a decrease in the cluster's allocation has been + // requested, billing will be based on the newly-requested level. + // Until completion of the embedded operation: + // * Cancelling the operation will set its metadata's "cancelled_at_time", + // and begin restoring resources to their pre-request values. The operation + // is guaranteed to succeed at undoing all resource changes, after which + // point it will terminate with a CANCELLED status. + // * All other attempts to modify or delete the cluster will be rejected. + // * Reading the cluster via the API will continue to give the pre-request + // resource levels. + // Upon completion of the embedded operation: + // * Billing will begin for all successfully-allocated resources (some types + // may have lower than the requested levels). + // * All newly-reserved resources will be available for serving the cluster's + // tables. + // * The cluster's new resource levels will be readable via the API. + // [UpdateClusterMetadata][google.bigtable.admin.cluster.v1.UpdateClusterMetadata] The embedded operation's "response" field type is + // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. + rpc UpdateCluster(Cluster) returns (Cluster) { + } + + // Marks a cluster and all of its tables for permanent deletion in 7 days. + // Immediately upon completion of the request: + // * Billing will cease for all of the cluster's reserved resources. + // * The cluster's "delete_time" field will be set 7 days in the future. + // Soon afterward: + // * All tables within the cluster will become unavailable. + // Prior to the cluster's "delete_time": + // * The cluster can be recovered with a call to UndeleteCluster. + // * All other attempts to modify or delete the cluster will be rejected. + // At the cluster's "delete_time": + // * The cluster and *all of its tables* will immediately and irrevocably + // disappear from the API, and their data will be permanently deleted. + rpc DeleteCluster(DeleteClusterRequest) returns (google.protobuf.Empty) { + } + + // Cancels the scheduled deletion of an cluster and begins preparing it to + // resume serving. The returned operation will also be embedded as the + // cluster's "current_operation". + // Immediately upon completion of this request: + // * The cluster's "delete_time" field will be unset, protecting it from + // automatic deletion. + // Until completion of the returned operation: + // * The operation cannot be cancelled. + // Upon completion of the returned operation: + // * Billing for the cluster's resources will resume. + // * All tables within the cluster will be available. + // [UndeleteClusterMetadata][google.bigtable.admin.cluster.v1.UndeleteClusterMetadata] The embedded operation's "response" field type is + // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service_messages.pb.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service_messages.pb.go new file mode 100644 index 000000000..c60c14a84 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service_messages.pb.go @@ -0,0 +1,205 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service_messages.proto +// DO NOT EDIT! + +/* +Package google_bigtable_admin_cluster_v1 is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service_messages.proto + google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service.proto + +It has these top-level messages: + ListZonesRequest + ListZonesResponse + GetClusterRequest + ListClustersRequest + ListClustersResponse + CreateClusterRequest + CreateClusterMetadata + UpdateClusterMetadata + DeleteClusterRequest + UndeleteClusterRequest + UndeleteClusterMetadata +*/ +package google_bigtable_admin_cluster_v1 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_bigtable_admin_cluster_v11 "google.golang.org/cloud/bigtable/internal/cluster_data_proto" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Request message for BigtableClusterService.ListZones. +type ListZonesRequest struct { + // The unique name of the project for which a list of supported zones is + // requested. + // Values are of the form projects/ + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *ListZonesRequest) Reset() { *m = ListZonesRequest{} } +func (m *ListZonesRequest) String() string { return proto.CompactTextString(m) } +func (*ListZonesRequest) ProtoMessage() {} + +// Response message for BigtableClusterService.ListZones. +type ListZonesResponse struct { + // The list of requested zones. + Zones []*google_bigtable_admin_cluster_v11.Zone `protobuf:"bytes,1,rep,name=zones" json:"zones,omitempty"` +} + +func (m *ListZonesResponse) Reset() { *m = ListZonesResponse{} } +func (m *ListZonesResponse) String() string { return proto.CompactTextString(m) } +func (*ListZonesResponse) ProtoMessage() {} + +func (m *ListZonesResponse) GetZones() []*google_bigtable_admin_cluster_v11.Zone { + if m != nil { + return m.Zones + } + return nil +} + +// Request message for BigtableClusterService.GetCluster. +type GetClusterRequest struct { + // The unique name of the requested cluster. + // Values are of the form projects//zones//clusters/ + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetClusterRequest) Reset() { *m = GetClusterRequest{} } +func (m *GetClusterRequest) String() string { return proto.CompactTextString(m) } +func (*GetClusterRequest) ProtoMessage() {} + +// Request message for BigtableClusterService.ListClusters. +type ListClustersRequest struct { + // The unique name of the project for which a list of clusters is requested. + // Values are of the form projects/ + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *ListClustersRequest) Reset() { *m = ListClustersRequest{} } +func (m *ListClustersRequest) String() string { return proto.CompactTextString(m) } +func (*ListClustersRequest) ProtoMessage() {} + +// Response message for BigtableClusterService.ListClusters. +type ListClustersResponse struct { + // The list of requested Clusters. + Clusters []*google_bigtable_admin_cluster_v11.Cluster `protobuf:"bytes,1,rep,name=clusters" json:"clusters,omitempty"` + // The zones for which clusters could not be retrieved. + FailedZones []*google_bigtable_admin_cluster_v11.Zone `protobuf:"bytes,2,rep,name=failed_zones" json:"failed_zones,omitempty"` +} + +func (m *ListClustersResponse) Reset() { *m = ListClustersResponse{} } +func (m *ListClustersResponse) String() string { return proto.CompactTextString(m) } +func (*ListClustersResponse) ProtoMessage() {} + +func (m *ListClustersResponse) GetClusters() []*google_bigtable_admin_cluster_v11.Cluster { + if m != nil { + return m.Clusters + } + return nil +} + +func (m *ListClustersResponse) GetFailedZones() []*google_bigtable_admin_cluster_v11.Zone { + if m != nil { + return m.FailedZones + } + return nil +} + +// Request message for BigtableClusterService.CreateCluster. +type CreateClusterRequest struct { + // The unique name of the zone in which to create the cluster. + // Values are of the form projects//zones/ + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The id to be used when referring to the new cluster within its zone, + // e.g. just the "test-cluster" section of the full name + // "projects//zones//clusters/test-cluster". + ClusterId string `protobuf:"bytes,2,opt,name=cluster_id" json:"cluster_id,omitempty"` + // The cluster to create. + // The "name", "delete_time", and "current_operation" fields must be left + // blank. + Cluster *google_bigtable_admin_cluster_v11.Cluster `protobuf:"bytes,3,opt,name=cluster" json:"cluster,omitempty"` +} + +func (m *CreateClusterRequest) Reset() { *m = CreateClusterRequest{} } +func (m *CreateClusterRequest) String() string { return proto.CompactTextString(m) } +func (*CreateClusterRequest) ProtoMessage() {} + +func (m *CreateClusterRequest) GetCluster() *google_bigtable_admin_cluster_v11.Cluster { + if m != nil { + return m.Cluster + } + return nil +} + +// Metadata type for the operation returned by +// BigtableClusterService.CreateCluster. +type CreateClusterMetadata struct { + // The request which prompted the creation of this operation. + OriginalRequest *CreateClusterRequest `protobuf:"bytes,1,opt,name=original_request" json:"original_request,omitempty"` +} + +func (m *CreateClusterMetadata) Reset() { *m = CreateClusterMetadata{} } +func (m *CreateClusterMetadata) String() string { return proto.CompactTextString(m) } +func (*CreateClusterMetadata) ProtoMessage() {} + +func (m *CreateClusterMetadata) GetOriginalRequest() *CreateClusterRequest { + if m != nil { + return m.OriginalRequest + } + return nil +} + +// Metadata type for the operation returned by +// BigtableClusterService.UpdateCluster. +type UpdateClusterMetadata struct { + // The request which prompted the creation of this operation. + OriginalRequest *google_bigtable_admin_cluster_v11.Cluster `protobuf:"bytes,1,opt,name=original_request" json:"original_request,omitempty"` +} + +func (m *UpdateClusterMetadata) Reset() { *m = UpdateClusterMetadata{} } +func (m *UpdateClusterMetadata) String() string { return proto.CompactTextString(m) } +func (*UpdateClusterMetadata) ProtoMessage() {} + +func (m *UpdateClusterMetadata) GetOriginalRequest() *google_bigtable_admin_cluster_v11.Cluster { + if m != nil { + return m.OriginalRequest + } + return nil +} + +// Request message for BigtableClusterService.DeleteCluster. +type DeleteClusterRequest struct { + // The unique name of the cluster to be deleted. + // Values are of the form projects//zones//clusters/ + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteClusterRequest) Reset() { *m = DeleteClusterRequest{} } +func (m *DeleteClusterRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteClusterRequest) ProtoMessage() {} + +// Request message for BigtableClusterService.UndeleteCluster. +type UndeleteClusterRequest struct { + // The unique name of the cluster to be un-deleted. + // Values are of the form projects//zones//clusters/ + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *UndeleteClusterRequest) Reset() { *m = UndeleteClusterRequest{} } +func (m *UndeleteClusterRequest) String() string { return proto.CompactTextString(m) } +func (*UndeleteClusterRequest) ProtoMessage() {} + +// Metadata type for the operation returned by +// BigtableClusterService.UndeleteCluster. +type UndeleteClusterMetadata struct { +} + +func (m *UndeleteClusterMetadata) Reset() { *m = UndeleteClusterMetadata{} } +func (m *UndeleteClusterMetadata) String() string { return proto.CompactTextString(m) } +func (*UndeleteClusterMetadata) ProtoMessage() {} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service_messages.proto b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service_messages.proto new file mode 100644 index 000000000..2e5d4a72d --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service_messages.proto @@ -0,0 +1,126 @@ +// Copyright (c) 2015, Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.admin.cluster.v1; + +import "google.golang.org/cloud/bigtable/internal/cluster_data_proto/bigtable_cluster_data.proto"; + +option java_multiple_files = true; +option java_outer_classname = "BigtableClusterServiceMessagesProto"; +option java_package = "com.google.bigtable.admin.cluster.v1"; + + +// Request message for BigtableClusterService.ListZones. +message ListZonesRequest { + // The unique name of the project for which a list of supported zones is + // requested. + // Values are of the form projects/ + string name = 1; +} + +// Response message for BigtableClusterService.ListZones. +message ListZonesResponse { + // The list of requested zones. + repeated Zone zones = 1; +} + +// Request message for BigtableClusterService.GetCluster. +message GetClusterRequest { + // The unique name of the requested cluster. + // Values are of the form projects//zones//clusters/ + string name = 1; +} + +// Request message for BigtableClusterService.ListClusters. +message ListClustersRequest { + // The unique name of the project for which a list of clusters is requested. + // Values are of the form projects/ + string name = 1; +} + +// Response message for BigtableClusterService.ListClusters. +message ListClustersResponse { + // The list of requested Clusters. + repeated Cluster clusters = 1; + + // The zones for which clusters could not be retrieved. + repeated Zone failed_zones = 2; +} + +// Request message for BigtableClusterService.CreateCluster. +message CreateClusterRequest { + // The unique name of the zone in which to create the cluster. + // Values are of the form projects//zones/ + string name = 1; + + // The id to be used when referring to the new cluster within its zone, + // e.g. just the "test-cluster" section of the full name + // "projects//zones//clusters/test-cluster". + string cluster_id = 2; + + // The cluster to create. + // The "name", "delete_time", and "current_operation" fields must be left + // blank. + Cluster cluster = 3; +} + +// Metadata type for the operation returned by +// BigtableClusterService.CreateCluster. +message CreateClusterMetadata { + // The request which prompted the creation of this operation. + CreateClusterRequest original_request = 1; + + // The time at which original_request was received. + + // The time at which this operation failed or was completed successfully. +} + +// Metadata type for the operation returned by +// BigtableClusterService.UpdateCluster. +message UpdateClusterMetadata { + // The request which prompted the creation of this operation. + Cluster original_request = 1; + + // The time at which original_request was received. + + // The time at which this operation was cancelled. If set, this operation is + // in the process of undoing itself (which is guaranteed to succeed) and + // cannot be cancelled again. + + // The time at which this operation failed or was completed successfully. +} + +// Request message for BigtableClusterService.DeleteCluster. +message DeleteClusterRequest { + // The unique name of the cluster to be deleted. + // Values are of the form projects//zones//clusters/ + string name = 1; +} + +// Request message for BigtableClusterService.UndeleteCluster. +message UndeleteClusterRequest { + // The unique name of the cluster to be un-deleted. + // Values are of the form projects//zones//clusters/ + string name = 1; +} + +// Metadata type for the operation returned by +// BigtableClusterService.UndeleteCluster. +message UndeleteClusterMetadata { + // The time at which the original request was received. + + // The time at which this operation failed or was completed successfully. +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/data_proto/bigtable_data.pb.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/data_proto/bigtable_data.pb.go new file mode 100644 index 000000000..49d3ab9c7 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/data_proto/bigtable_data.pb.go @@ -0,0 +1,1462 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/cloud/bigtable/internal/data_proto/bigtable_data.proto +// DO NOT EDIT! + +/* +Package google_bigtable_v1 is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/cloud/bigtable/internal/data_proto/bigtable_data.proto + +It has these top-level messages: + Row + Family + Column + Cell + RowRange + ColumnRange + TimestampRange + ValueRange + RowFilter + Mutation + ReadModifyWriteRule +*/ +package google_bigtable_v1 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Specifies the complete (requested) contents of a single row of a table. +// Rows which exceed 256MiB in size cannot be read in full. +type Row struct { + // The unique key which identifies this row within its table. This is the same + // key that's used to identify the row in, for example, a MutateRowRequest. + // May contain any non-empty byte string up to 4KiB in length. + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // May be empty, but only if the entire row is empty. + // The mutual ordering of column families is not specified. + Families []*Family `protobuf:"bytes,2,rep,name=families" json:"families,omitempty"` +} + +func (m *Row) Reset() { *m = Row{} } +func (m *Row) String() string { return proto.CompactTextString(m) } +func (*Row) ProtoMessage() {} + +func (m *Row) GetFamilies() []*Family { + if m != nil { + return m.Families + } + return nil +} + +// Specifies (some of) the contents of a single row/column family of a table. +type Family struct { + // The unique key which identifies this family within its row. This is the + // same key that's used to identify the family in, for example, a RowFilter + // which sets its "family_name_regex_filter" field. + // Must match [-_.a-zA-Z0-9]+, except that AggregatingRowProcessors may + // produce cells in a sentinel family with an empty name. + // Must be no greater than 64 characters in length. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Must not be empty. Sorted in order of increasing "qualifier". + Columns []*Column `protobuf:"bytes,2,rep,name=columns" json:"columns,omitempty"` +} + +func (m *Family) Reset() { *m = Family{} } +func (m *Family) String() string { return proto.CompactTextString(m) } +func (*Family) ProtoMessage() {} + +func (m *Family) GetColumns() []*Column { + if m != nil { + return m.Columns + } + return nil +} + +// Specifies (some of) the contents of a single row/column of a table. +type Column struct { + // The unique key which identifies this column within its family. This is the + // same key that's used to identify the column in, for example, a RowFilter + // which sets its "column_qualifier_regex_filter" field. + // May contain any byte string, including the empty string, up to 16kiB in + // length. + Qualifier []byte `protobuf:"bytes,1,opt,name=qualifier,proto3" json:"qualifier,omitempty"` + // Must not be empty. Sorted in order of decreasing "timestamp_micros". + Cells []*Cell `protobuf:"bytes,2,rep,name=cells" json:"cells,omitempty"` +} + +func (m *Column) Reset() { *m = Column{} } +func (m *Column) String() string { return proto.CompactTextString(m) } +func (*Column) ProtoMessage() {} + +func (m *Column) GetCells() []*Cell { + if m != nil { + return m.Cells + } + return nil +} + +// Specifies (some of) the contents of a single row/column/timestamp of a table. +type Cell struct { + // The cell's stored timestamp, which also uniquely identifies it within + // its column. + // Values are always expressed in microseconds, but individual tables may set + // a coarser "granularity" to further restrict the allowed values. For + // example, a table which specifies millisecond granularity will only allow + // values of "timestamp_micros" which are multiples of 1000. + TimestampMicros int64 `protobuf:"varint,1,opt,name=timestamp_micros" json:"timestamp_micros,omitempty"` + // The value stored in the cell. + // May contain any byte string, including the empty string, up to 100MiB in + // length. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + // Labels applied to the cell by a [RowFilter][google.bigtable.v1.RowFilter]. + Labels []string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty"` +} + +func (m *Cell) Reset() { *m = Cell{} } +func (m *Cell) String() string { return proto.CompactTextString(m) } +func (*Cell) ProtoMessage() {} + +// Specifies a contiguous range of rows. +type RowRange struct { + // Inclusive lower bound. If left empty, interpreted as the empty string. + StartKey []byte `protobuf:"bytes,2,opt,name=start_key,proto3" json:"start_key,omitempty"` + // Exclusive upper bound. If left empty, interpreted as infinity. + EndKey []byte `protobuf:"bytes,3,opt,name=end_key,proto3" json:"end_key,omitempty"` +} + +func (m *RowRange) Reset() { *m = RowRange{} } +func (m *RowRange) String() string { return proto.CompactTextString(m) } +func (*RowRange) ProtoMessage() {} + +// Specifies a contiguous range of columns within a single column family. +// The range spans from : to +// :, where both bounds can be either inclusive or +// exclusive. +type ColumnRange struct { + // The name of the column family within which this range falls. + FamilyName string `protobuf:"bytes,1,opt,name=family_name" json:"family_name,omitempty"` + // The column qualifier at which to start the range (within 'column_family'). + // If neither field is set, interpreted as the empty string, inclusive. + // + // Types that are valid to be assigned to StartQualifier: + // *ColumnRange_StartQualifierInclusive + // *ColumnRange_StartQualifierExclusive + StartQualifier isColumnRange_StartQualifier `protobuf_oneof:"start_qualifier"` + // The column qualifier at which to end the range (within 'column_family'). + // If neither field is set, interpreted as the infinite string, exclusive. + // + // Types that are valid to be assigned to EndQualifier: + // *ColumnRange_EndQualifierInclusive + // *ColumnRange_EndQualifierExclusive + EndQualifier isColumnRange_EndQualifier `protobuf_oneof:"end_qualifier"` +} + +func (m *ColumnRange) Reset() { *m = ColumnRange{} } +func (m *ColumnRange) String() string { return proto.CompactTextString(m) } +func (*ColumnRange) ProtoMessage() {} + +type isColumnRange_StartQualifier interface { + isColumnRange_StartQualifier() +} +type isColumnRange_EndQualifier interface { + isColumnRange_EndQualifier() +} + +type ColumnRange_StartQualifierInclusive struct { + StartQualifierInclusive []byte `protobuf:"bytes,2,opt,name=start_qualifier_inclusive,proto3"` +} +type ColumnRange_StartQualifierExclusive struct { + StartQualifierExclusive []byte `protobuf:"bytes,3,opt,name=start_qualifier_exclusive,proto3"` +} +type ColumnRange_EndQualifierInclusive struct { + EndQualifierInclusive []byte `protobuf:"bytes,4,opt,name=end_qualifier_inclusive,proto3"` +} +type ColumnRange_EndQualifierExclusive struct { + EndQualifierExclusive []byte `protobuf:"bytes,5,opt,name=end_qualifier_exclusive,proto3"` +} + +func (*ColumnRange_StartQualifierInclusive) isColumnRange_StartQualifier() {} +func (*ColumnRange_StartQualifierExclusive) isColumnRange_StartQualifier() {} +func (*ColumnRange_EndQualifierInclusive) isColumnRange_EndQualifier() {} +func (*ColumnRange_EndQualifierExclusive) isColumnRange_EndQualifier() {} + +func (m *ColumnRange) GetStartQualifier() isColumnRange_StartQualifier { + if m != nil { + return m.StartQualifier + } + return nil +} +func (m *ColumnRange) GetEndQualifier() isColumnRange_EndQualifier { + if m != nil { + return m.EndQualifier + } + return nil +} + +func (m *ColumnRange) GetStartQualifierInclusive() []byte { + if x, ok := m.GetStartQualifier().(*ColumnRange_StartQualifierInclusive); ok { + return x.StartQualifierInclusive + } + return nil +} + +func (m *ColumnRange) GetStartQualifierExclusive() []byte { + if x, ok := m.GetStartQualifier().(*ColumnRange_StartQualifierExclusive); ok { + return x.StartQualifierExclusive + } + return nil +} + +func (m *ColumnRange) GetEndQualifierInclusive() []byte { + if x, ok := m.GetEndQualifier().(*ColumnRange_EndQualifierInclusive); ok { + return x.EndQualifierInclusive + } + return nil +} + +func (m *ColumnRange) GetEndQualifierExclusive() []byte { + if x, ok := m.GetEndQualifier().(*ColumnRange_EndQualifierExclusive); ok { + return x.EndQualifierExclusive + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ColumnRange) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), []interface{}) { + return _ColumnRange_OneofMarshaler, _ColumnRange_OneofUnmarshaler, []interface{}{ + (*ColumnRange_StartQualifierInclusive)(nil), + (*ColumnRange_StartQualifierExclusive)(nil), + (*ColumnRange_EndQualifierInclusive)(nil), + (*ColumnRange_EndQualifierExclusive)(nil), + } +} + +func _ColumnRange_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ColumnRange) + // start_qualifier + switch x := m.StartQualifier.(type) { + case *ColumnRange_StartQualifierInclusive: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeRawBytes(x.StartQualifierInclusive) + case *ColumnRange_StartQualifierExclusive: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeRawBytes(x.StartQualifierExclusive) + case nil: + default: + return fmt.Errorf("ColumnRange.StartQualifier has unexpected type %T", x) + } + // end_qualifier + switch x := m.EndQualifier.(type) { + case *ColumnRange_EndQualifierInclusive: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeRawBytes(x.EndQualifierInclusive) + case *ColumnRange_EndQualifierExclusive: + b.EncodeVarint(5<<3 | proto.WireBytes) + b.EncodeRawBytes(x.EndQualifierExclusive) + case nil: + default: + return fmt.Errorf("ColumnRange.EndQualifier has unexpected type %T", x) + } + return nil +} + +func _ColumnRange_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ColumnRange) + switch tag { + case 2: // start_qualifier.start_qualifier_inclusive + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.StartQualifier = &ColumnRange_StartQualifierInclusive{x} + return true, err + case 3: // start_qualifier.start_qualifier_exclusive + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.StartQualifier = &ColumnRange_StartQualifierExclusive{x} + return true, err + case 4: // end_qualifier.end_qualifier_inclusive + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.EndQualifier = &ColumnRange_EndQualifierInclusive{x} + return true, err + case 5: // end_qualifier.end_qualifier_exclusive + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.EndQualifier = &ColumnRange_EndQualifierExclusive{x} + return true, err + default: + return false, nil + } +} + +// Specified a contiguous range of microsecond timestamps. +type TimestampRange struct { + // Inclusive lower bound. If left empty, interpreted as 0. + StartTimestampMicros int64 `protobuf:"varint,1,opt,name=start_timestamp_micros" json:"start_timestamp_micros,omitempty"` + // Exclusive upper bound. If left empty, interpreted as infinity. + EndTimestampMicros int64 `protobuf:"varint,2,opt,name=end_timestamp_micros" json:"end_timestamp_micros,omitempty"` +} + +func (m *TimestampRange) Reset() { *m = TimestampRange{} } +func (m *TimestampRange) String() string { return proto.CompactTextString(m) } +func (*TimestampRange) ProtoMessage() {} + +// Specifies a contiguous range of raw byte values. +type ValueRange struct { + // The value at which to start the range. + // If neither field is set, interpreted as the empty string, inclusive. + // + // Types that are valid to be assigned to StartValue: + // *ValueRange_StartValueInclusive + // *ValueRange_StartValueExclusive + StartValue isValueRange_StartValue `protobuf_oneof:"start_value"` + // The value at which to end the range. + // If neither field is set, interpreted as the infinite string, exclusive. + // + // Types that are valid to be assigned to EndValue: + // *ValueRange_EndValueInclusive + // *ValueRange_EndValueExclusive + EndValue isValueRange_EndValue `protobuf_oneof:"end_value"` +} + +func (m *ValueRange) Reset() { *m = ValueRange{} } +func (m *ValueRange) String() string { return proto.CompactTextString(m) } +func (*ValueRange) ProtoMessage() {} + +type isValueRange_StartValue interface { + isValueRange_StartValue() +} +type isValueRange_EndValue interface { + isValueRange_EndValue() +} + +type ValueRange_StartValueInclusive struct { + StartValueInclusive []byte `protobuf:"bytes,1,opt,name=start_value_inclusive,proto3"` +} +type ValueRange_StartValueExclusive struct { + StartValueExclusive []byte `protobuf:"bytes,2,opt,name=start_value_exclusive,proto3"` +} +type ValueRange_EndValueInclusive struct { + EndValueInclusive []byte `protobuf:"bytes,3,opt,name=end_value_inclusive,proto3"` +} +type ValueRange_EndValueExclusive struct { + EndValueExclusive []byte `protobuf:"bytes,4,opt,name=end_value_exclusive,proto3"` +} + +func (*ValueRange_StartValueInclusive) isValueRange_StartValue() {} +func (*ValueRange_StartValueExclusive) isValueRange_StartValue() {} +func (*ValueRange_EndValueInclusive) isValueRange_EndValue() {} +func (*ValueRange_EndValueExclusive) isValueRange_EndValue() {} + +func (m *ValueRange) GetStartValue() isValueRange_StartValue { + if m != nil { + return m.StartValue + } + return nil +} +func (m *ValueRange) GetEndValue() isValueRange_EndValue { + if m != nil { + return m.EndValue + } + return nil +} + +func (m *ValueRange) GetStartValueInclusive() []byte { + if x, ok := m.GetStartValue().(*ValueRange_StartValueInclusive); ok { + return x.StartValueInclusive + } + return nil +} + +func (m *ValueRange) GetStartValueExclusive() []byte { + if x, ok := m.GetStartValue().(*ValueRange_StartValueExclusive); ok { + return x.StartValueExclusive + } + return nil +} + +func (m *ValueRange) GetEndValueInclusive() []byte { + if x, ok := m.GetEndValue().(*ValueRange_EndValueInclusive); ok { + return x.EndValueInclusive + } + return nil +} + +func (m *ValueRange) GetEndValueExclusive() []byte { + if x, ok := m.GetEndValue().(*ValueRange_EndValueExclusive); ok { + return x.EndValueExclusive + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ValueRange) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), []interface{}) { + return _ValueRange_OneofMarshaler, _ValueRange_OneofUnmarshaler, []interface{}{ + (*ValueRange_StartValueInclusive)(nil), + (*ValueRange_StartValueExclusive)(nil), + (*ValueRange_EndValueInclusive)(nil), + (*ValueRange_EndValueExclusive)(nil), + } +} + +func _ValueRange_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ValueRange) + // start_value + switch x := m.StartValue.(type) { + case *ValueRange_StartValueInclusive: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeRawBytes(x.StartValueInclusive) + case *ValueRange_StartValueExclusive: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeRawBytes(x.StartValueExclusive) + case nil: + default: + return fmt.Errorf("ValueRange.StartValue has unexpected type %T", x) + } + // end_value + switch x := m.EndValue.(type) { + case *ValueRange_EndValueInclusive: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeRawBytes(x.EndValueInclusive) + case *ValueRange_EndValueExclusive: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeRawBytes(x.EndValueExclusive) + case nil: + default: + return fmt.Errorf("ValueRange.EndValue has unexpected type %T", x) + } + return nil +} + +func _ValueRange_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ValueRange) + switch tag { + case 1: // start_value.start_value_inclusive + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.StartValue = &ValueRange_StartValueInclusive{x} + return true, err + case 2: // start_value.start_value_exclusive + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.StartValue = &ValueRange_StartValueExclusive{x} + return true, err + case 3: // end_value.end_value_inclusive + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.EndValue = &ValueRange_EndValueInclusive{x} + return true, err + case 4: // end_value.end_value_exclusive + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.EndValue = &ValueRange_EndValueExclusive{x} + return true, err + default: + return false, nil + } +} + +// Takes a row as input and produces an alternate view of the row based on +// specified rules. For example, a RowFilter might trim down a row to include +// just the cells from columns matching a given regular expression, or might +// return all the cells of a row but not their values. More complicated filters +// can be composed out of these components to express requests such as, "within +// every column of a particular family, give just the two most recent cells +// which are older than timestamp X." +// +// There are two broad categories of RowFilters (true filters and transformers), +// as well as two ways to compose simple filters into more complex ones +// (chains and interleaves). They work as follows: +// +// * True filters alter the input row by excluding some of its cells wholesale +// from the output row. An example of a true filter is the "value_regex_filter", +// which excludes cells whose values don't match the specified pattern. All +// regex true filters use RE2 syntax (https://github.com/google/re2/wiki/Syntax) +// in raw byte mode (RE2::Latin1), and are evaluated as full matches. An +// important point to keep in mind is that RE2(.) is equivalent by default to +// RE2([^\n]), meaning that it does not match newlines. When attempting to match +// an arbitrary byte, you should therefore use the escape sequence '\C', which +// may need to be further escaped as '\\C' in your client language. +// +// * Transformers alter the input row by changing the values of some of its +// cells in the output, without excluding them completely. Currently, the only +// supported transformer is the "strip_value_transformer", which replaces every +// cell's value with the empty string. +// +// * Chains and interleaves are described in more detail in the +// RowFilter.Chain and RowFilter.Interleave documentation. +// +// The total serialized size of a RowFilter message must not +// exceed 4096 bytes, and RowFilters may not be nested within each other +// (in Chains or Interleaves) to a depth of more than 20. +type RowFilter struct { + // Which of the possible RowFilter types to apply. If none are set, this + // RowFilter returns all cells in the input row. + // + // Types that are valid to be assigned to Filter: + // *RowFilter_Chain_ + // *RowFilter_Interleave_ + // *RowFilter_Condition_ + // *RowFilter_Sink + // *RowFilter_PassAllFilter + // *RowFilter_BlockAllFilter + // *RowFilter_RowKeyRegexFilter + // *RowFilter_RowSampleFilter + // *RowFilter_FamilyNameRegexFilter + // *RowFilter_ColumnQualifierRegexFilter + // *RowFilter_ColumnRangeFilter + // *RowFilter_TimestampRangeFilter + // *RowFilter_ValueRegexFilter + // *RowFilter_ValueRangeFilter + // *RowFilter_CellsPerRowOffsetFilter + // *RowFilter_CellsPerRowLimitFilter + // *RowFilter_CellsPerColumnLimitFilter + // *RowFilter_StripValueTransformer + // *RowFilter_ApplyLabelTransformer + Filter isRowFilter_Filter `protobuf_oneof:"filter"` +} + +func (m *RowFilter) Reset() { *m = RowFilter{} } +func (m *RowFilter) String() string { return proto.CompactTextString(m) } +func (*RowFilter) ProtoMessage() {} + +type isRowFilter_Filter interface { + isRowFilter_Filter() +} + +type RowFilter_Chain_ struct { + Chain *RowFilter_Chain `protobuf:"bytes,1,opt,name=chain"` +} +type RowFilter_Interleave_ struct { + Interleave *RowFilter_Interleave `protobuf:"bytes,2,opt,name=interleave"` +} +type RowFilter_Condition_ struct { + Condition *RowFilter_Condition `protobuf:"bytes,3,opt,name=condition"` +} +type RowFilter_Sink struct { + Sink bool `protobuf:"varint,16,opt,name=sink"` +} +type RowFilter_PassAllFilter struct { + PassAllFilter bool `protobuf:"varint,17,opt,name=pass_all_filter"` +} +type RowFilter_BlockAllFilter struct { + BlockAllFilter bool `protobuf:"varint,18,opt,name=block_all_filter"` +} +type RowFilter_RowKeyRegexFilter struct { + RowKeyRegexFilter []byte `protobuf:"bytes,4,opt,name=row_key_regex_filter,proto3"` +} +type RowFilter_RowSampleFilter struct { + RowSampleFilter float64 `protobuf:"fixed64,14,opt,name=row_sample_filter"` +} +type RowFilter_FamilyNameRegexFilter struct { + FamilyNameRegexFilter string `protobuf:"bytes,5,opt,name=family_name_regex_filter"` +} +type RowFilter_ColumnQualifierRegexFilter struct { + ColumnQualifierRegexFilter []byte `protobuf:"bytes,6,opt,name=column_qualifier_regex_filter,proto3"` +} +type RowFilter_ColumnRangeFilter struct { + ColumnRangeFilter *ColumnRange `protobuf:"bytes,7,opt,name=column_range_filter"` +} +type RowFilter_TimestampRangeFilter struct { + TimestampRangeFilter *TimestampRange `protobuf:"bytes,8,opt,name=timestamp_range_filter"` +} +type RowFilter_ValueRegexFilter struct { + ValueRegexFilter []byte `protobuf:"bytes,9,opt,name=value_regex_filter,proto3"` +} +type RowFilter_ValueRangeFilter struct { + ValueRangeFilter *ValueRange `protobuf:"bytes,15,opt,name=value_range_filter"` +} +type RowFilter_CellsPerRowOffsetFilter struct { + CellsPerRowOffsetFilter int32 `protobuf:"varint,10,opt,name=cells_per_row_offset_filter"` +} +type RowFilter_CellsPerRowLimitFilter struct { + CellsPerRowLimitFilter int32 `protobuf:"varint,11,opt,name=cells_per_row_limit_filter"` +} +type RowFilter_CellsPerColumnLimitFilter struct { + CellsPerColumnLimitFilter int32 `protobuf:"varint,12,opt,name=cells_per_column_limit_filter"` +} +type RowFilter_StripValueTransformer struct { + StripValueTransformer bool `protobuf:"varint,13,opt,name=strip_value_transformer"` +} +type RowFilter_ApplyLabelTransformer struct { + ApplyLabelTransformer string `protobuf:"bytes,19,opt,name=apply_label_transformer"` +} + +func (*RowFilter_Chain_) isRowFilter_Filter() {} +func (*RowFilter_Interleave_) isRowFilter_Filter() {} +func (*RowFilter_Condition_) isRowFilter_Filter() {} +func (*RowFilter_Sink) isRowFilter_Filter() {} +func (*RowFilter_PassAllFilter) isRowFilter_Filter() {} +func (*RowFilter_BlockAllFilter) isRowFilter_Filter() {} +func (*RowFilter_RowKeyRegexFilter) isRowFilter_Filter() {} +func (*RowFilter_RowSampleFilter) isRowFilter_Filter() {} +func (*RowFilter_FamilyNameRegexFilter) isRowFilter_Filter() {} +func (*RowFilter_ColumnQualifierRegexFilter) isRowFilter_Filter() {} +func (*RowFilter_ColumnRangeFilter) isRowFilter_Filter() {} +func (*RowFilter_TimestampRangeFilter) isRowFilter_Filter() {} +func (*RowFilter_ValueRegexFilter) isRowFilter_Filter() {} +func (*RowFilter_ValueRangeFilter) isRowFilter_Filter() {} +func (*RowFilter_CellsPerRowOffsetFilter) isRowFilter_Filter() {} +func (*RowFilter_CellsPerRowLimitFilter) isRowFilter_Filter() {} +func (*RowFilter_CellsPerColumnLimitFilter) isRowFilter_Filter() {} +func (*RowFilter_StripValueTransformer) isRowFilter_Filter() {} +func (*RowFilter_ApplyLabelTransformer) isRowFilter_Filter() {} + +func (m *RowFilter) GetFilter() isRowFilter_Filter { + if m != nil { + return m.Filter + } + return nil +} + +func (m *RowFilter) GetChain() *RowFilter_Chain { + if x, ok := m.GetFilter().(*RowFilter_Chain_); ok { + return x.Chain + } + return nil +} + +func (m *RowFilter) GetInterleave() *RowFilter_Interleave { + if x, ok := m.GetFilter().(*RowFilter_Interleave_); ok { + return x.Interleave + } + return nil +} + +func (m *RowFilter) GetCondition() *RowFilter_Condition { + if x, ok := m.GetFilter().(*RowFilter_Condition_); ok { + return x.Condition + } + return nil +} + +func (m *RowFilter) GetSink() bool { + if x, ok := m.GetFilter().(*RowFilter_Sink); ok { + return x.Sink + } + return false +} + +func (m *RowFilter) GetPassAllFilter() bool { + if x, ok := m.GetFilter().(*RowFilter_PassAllFilter); ok { + return x.PassAllFilter + } + return false +} + +func (m *RowFilter) GetBlockAllFilter() bool { + if x, ok := m.GetFilter().(*RowFilter_BlockAllFilter); ok { + return x.BlockAllFilter + } + return false +} + +func (m *RowFilter) GetRowKeyRegexFilter() []byte { + if x, ok := m.GetFilter().(*RowFilter_RowKeyRegexFilter); ok { + return x.RowKeyRegexFilter + } + return nil +} + +func (m *RowFilter) GetRowSampleFilter() float64 { + if x, ok := m.GetFilter().(*RowFilter_RowSampleFilter); ok { + return x.RowSampleFilter + } + return 0 +} + +func (m *RowFilter) GetFamilyNameRegexFilter() string { + if x, ok := m.GetFilter().(*RowFilter_FamilyNameRegexFilter); ok { + return x.FamilyNameRegexFilter + } + return "" +} + +func (m *RowFilter) GetColumnQualifierRegexFilter() []byte { + if x, ok := m.GetFilter().(*RowFilter_ColumnQualifierRegexFilter); ok { + return x.ColumnQualifierRegexFilter + } + return nil +} + +func (m *RowFilter) GetColumnRangeFilter() *ColumnRange { + if x, ok := m.GetFilter().(*RowFilter_ColumnRangeFilter); ok { + return x.ColumnRangeFilter + } + return nil +} + +func (m *RowFilter) GetTimestampRangeFilter() *TimestampRange { + if x, ok := m.GetFilter().(*RowFilter_TimestampRangeFilter); ok { + return x.TimestampRangeFilter + } + return nil +} + +func (m *RowFilter) GetValueRegexFilter() []byte { + if x, ok := m.GetFilter().(*RowFilter_ValueRegexFilter); ok { + return x.ValueRegexFilter + } + return nil +} + +func (m *RowFilter) GetValueRangeFilter() *ValueRange { + if x, ok := m.GetFilter().(*RowFilter_ValueRangeFilter); ok { + return x.ValueRangeFilter + } + return nil +} + +func (m *RowFilter) GetCellsPerRowOffsetFilter() int32 { + if x, ok := m.GetFilter().(*RowFilter_CellsPerRowOffsetFilter); ok { + return x.CellsPerRowOffsetFilter + } + return 0 +} + +func (m *RowFilter) GetCellsPerRowLimitFilter() int32 { + if x, ok := m.GetFilter().(*RowFilter_CellsPerRowLimitFilter); ok { + return x.CellsPerRowLimitFilter + } + return 0 +} + +func (m *RowFilter) GetCellsPerColumnLimitFilter() int32 { + if x, ok := m.GetFilter().(*RowFilter_CellsPerColumnLimitFilter); ok { + return x.CellsPerColumnLimitFilter + } + return 0 +} + +func (m *RowFilter) GetStripValueTransformer() bool { + if x, ok := m.GetFilter().(*RowFilter_StripValueTransformer); ok { + return x.StripValueTransformer + } + return false +} + +func (m *RowFilter) GetApplyLabelTransformer() string { + if x, ok := m.GetFilter().(*RowFilter_ApplyLabelTransformer); ok { + return x.ApplyLabelTransformer + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*RowFilter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), []interface{}) { + return _RowFilter_OneofMarshaler, _RowFilter_OneofUnmarshaler, []interface{}{ + (*RowFilter_Chain_)(nil), + (*RowFilter_Interleave_)(nil), + (*RowFilter_Condition_)(nil), + (*RowFilter_Sink)(nil), + (*RowFilter_PassAllFilter)(nil), + (*RowFilter_BlockAllFilter)(nil), + (*RowFilter_RowKeyRegexFilter)(nil), + (*RowFilter_RowSampleFilter)(nil), + (*RowFilter_FamilyNameRegexFilter)(nil), + (*RowFilter_ColumnQualifierRegexFilter)(nil), + (*RowFilter_ColumnRangeFilter)(nil), + (*RowFilter_TimestampRangeFilter)(nil), + (*RowFilter_ValueRegexFilter)(nil), + (*RowFilter_ValueRangeFilter)(nil), + (*RowFilter_CellsPerRowOffsetFilter)(nil), + (*RowFilter_CellsPerRowLimitFilter)(nil), + (*RowFilter_CellsPerColumnLimitFilter)(nil), + (*RowFilter_StripValueTransformer)(nil), + (*RowFilter_ApplyLabelTransformer)(nil), + } +} + +func _RowFilter_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*RowFilter) + // filter + switch x := m.Filter.(type) { + case *RowFilter_Chain_: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Chain); err != nil { + return err + } + case *RowFilter_Interleave_: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Interleave); err != nil { + return err + } + case *RowFilter_Condition_: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Condition); err != nil { + return err + } + case *RowFilter_Sink: + t := uint64(0) + if x.Sink { + t = 1 + } + b.EncodeVarint(16<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *RowFilter_PassAllFilter: + t := uint64(0) + if x.PassAllFilter { + t = 1 + } + b.EncodeVarint(17<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *RowFilter_BlockAllFilter: + t := uint64(0) + if x.BlockAllFilter { + t = 1 + } + b.EncodeVarint(18<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *RowFilter_RowKeyRegexFilter: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeRawBytes(x.RowKeyRegexFilter) + case *RowFilter_RowSampleFilter: + b.EncodeVarint(14<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.RowSampleFilter)) + case *RowFilter_FamilyNameRegexFilter: + b.EncodeVarint(5<<3 | proto.WireBytes) + b.EncodeStringBytes(x.FamilyNameRegexFilter) + case *RowFilter_ColumnQualifierRegexFilter: + b.EncodeVarint(6<<3 | proto.WireBytes) + b.EncodeRawBytes(x.ColumnQualifierRegexFilter) + case *RowFilter_ColumnRangeFilter: + b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ColumnRangeFilter); err != nil { + return err + } + case *RowFilter_TimestampRangeFilter: + b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.TimestampRangeFilter); err != nil { + return err + } + case *RowFilter_ValueRegexFilter: + b.EncodeVarint(9<<3 | proto.WireBytes) + b.EncodeRawBytes(x.ValueRegexFilter) + case *RowFilter_ValueRangeFilter: + b.EncodeVarint(15<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ValueRangeFilter); err != nil { + return err + } + case *RowFilter_CellsPerRowOffsetFilter: + b.EncodeVarint(10<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.CellsPerRowOffsetFilter)) + case *RowFilter_CellsPerRowLimitFilter: + b.EncodeVarint(11<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.CellsPerRowLimitFilter)) + case *RowFilter_CellsPerColumnLimitFilter: + b.EncodeVarint(12<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.CellsPerColumnLimitFilter)) + case *RowFilter_StripValueTransformer: + t := uint64(0) + if x.StripValueTransformer { + t = 1 + } + b.EncodeVarint(13<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *RowFilter_ApplyLabelTransformer: + b.EncodeVarint(19<<3 | proto.WireBytes) + b.EncodeStringBytes(x.ApplyLabelTransformer) + case nil: + default: + return fmt.Errorf("RowFilter.Filter has unexpected type %T", x) + } + return nil +} + +func _RowFilter_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*RowFilter) + switch tag { + case 1: // filter.chain + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RowFilter_Chain) + err := b.DecodeMessage(msg) + m.Filter = &RowFilter_Chain_{msg} + return true, err + case 2: // filter.interleave + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RowFilter_Interleave) + err := b.DecodeMessage(msg) + m.Filter = &RowFilter_Interleave_{msg} + return true, err + case 3: // filter.condition + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RowFilter_Condition) + err := b.DecodeMessage(msg) + m.Filter = &RowFilter_Condition_{msg} + return true, err + case 16: // filter.sink + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Filter = &RowFilter_Sink{x != 0} + return true, err + case 17: // filter.pass_all_filter + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Filter = &RowFilter_PassAllFilter{x != 0} + return true, err + case 18: // filter.block_all_filter + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Filter = &RowFilter_BlockAllFilter{x != 0} + return true, err + case 4: // filter.row_key_regex_filter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Filter = &RowFilter_RowKeyRegexFilter{x} + return true, err + case 14: // filter.row_sample_filter + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Filter = &RowFilter_RowSampleFilter{math.Float64frombits(x)} + return true, err + case 5: // filter.family_name_regex_filter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Filter = &RowFilter_FamilyNameRegexFilter{x} + return true, err + case 6: // filter.column_qualifier_regex_filter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Filter = &RowFilter_ColumnQualifierRegexFilter{x} + return true, err + case 7: // filter.column_range_filter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ColumnRange) + err := b.DecodeMessage(msg) + m.Filter = &RowFilter_ColumnRangeFilter{msg} + return true, err + case 8: // filter.timestamp_range_filter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TimestampRange) + err := b.DecodeMessage(msg) + m.Filter = &RowFilter_TimestampRangeFilter{msg} + return true, err + case 9: // filter.value_regex_filter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Filter = &RowFilter_ValueRegexFilter{x} + return true, err + case 15: // filter.value_range_filter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ValueRange) + err := b.DecodeMessage(msg) + m.Filter = &RowFilter_ValueRangeFilter{msg} + return true, err + case 10: // filter.cells_per_row_offset_filter + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Filter = &RowFilter_CellsPerRowOffsetFilter{int32(x)} + return true, err + case 11: // filter.cells_per_row_limit_filter + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Filter = &RowFilter_CellsPerRowLimitFilter{int32(x)} + return true, err + case 12: // filter.cells_per_column_limit_filter + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Filter = &RowFilter_CellsPerColumnLimitFilter{int32(x)} + return true, err + case 13: // filter.strip_value_transformer + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Filter = &RowFilter_StripValueTransformer{x != 0} + return true, err + case 19: // filter.apply_label_transformer + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Filter = &RowFilter_ApplyLabelTransformer{x} + return true, err + default: + return false, nil + } +} + +// A RowFilter which sends rows through several RowFilters in sequence. +type RowFilter_Chain struct { + // The elements of "filters" are chained together to process the input row: + // in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row + // The full chain is executed atomically. + Filters []*RowFilter `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"` +} + +func (m *RowFilter_Chain) Reset() { *m = RowFilter_Chain{} } +func (m *RowFilter_Chain) String() string { return proto.CompactTextString(m) } +func (*RowFilter_Chain) ProtoMessage() {} + +func (m *RowFilter_Chain) GetFilters() []*RowFilter { + if m != nil { + return m.Filters + } + return nil +} + +// A RowFilter which sends each row to each of several component +// RowFilters and interleaves the results. +type RowFilter_Interleave struct { + // The elements of "filters" all process a copy of the input row, and the + // results are pooled, sorted, and combined into a single output row. + // If multiple cells are produced with the same column and timestamp, + // they will all appear in the output row in an unspecified mutual order. + // Consider the following example, with three filters: + // + // input row + // | + // ----------------------------------------------------- + // | | | + // f(0) f(1) f(2) + // | | | + // 1: foo,bar,10,x foo,bar,10,z far,bar,7,a + // 2: foo,blah,11,z far,blah,5,x far,blah,5,x + // | | | + // ----------------------------------------------------- + // | + // 1: foo,bar,10,z // could have switched with #2 + // 2: foo,bar,10,x // could have switched with #1 + // 3: foo,blah,11,z + // 4: far,bar,7,a + // 5: far,blah,5,x // identical to #6 + // 6: far,blah,5,x // identical to #5 + // All interleaved filters are executed atomically. + Filters []*RowFilter `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"` +} + +func (m *RowFilter_Interleave) Reset() { *m = RowFilter_Interleave{} } +func (m *RowFilter_Interleave) String() string { return proto.CompactTextString(m) } +func (*RowFilter_Interleave) ProtoMessage() {} + +func (m *RowFilter_Interleave) GetFilters() []*RowFilter { + if m != nil { + return m.Filters + } + return nil +} + +// A RowFilter which evaluates one of two possible RowFilters, depending on +// whether or not a predicate RowFilter outputs any cells from the input row. +// +// IMPORTANT NOTE: The predicate filter does not execute atomically with the +// true and false filters, which may lead to inconsistent or unexpected +// results. Additionally, Condition filters have poor performance, especially +// when filters are set for the false condition. +type RowFilter_Condition struct { + // If "predicate_filter" outputs any cells, then "true_filter" will be + // evaluated on the input row. Otherwise, "false_filter" will be evaluated. + PredicateFilter *RowFilter `protobuf:"bytes,1,opt,name=predicate_filter" json:"predicate_filter,omitempty"` + // The filter to apply to the input row if "predicate_filter" returns any + // results. If not provided, no results will be returned in the true case. + TrueFilter *RowFilter `protobuf:"bytes,2,opt,name=true_filter" json:"true_filter,omitempty"` + // The filter to apply to the input row if "predicate_filter" does not + // return any results. If not provided, no results will be returned in the + // false case. + FalseFilter *RowFilter `protobuf:"bytes,3,opt,name=false_filter" json:"false_filter,omitempty"` +} + +func (m *RowFilter_Condition) Reset() { *m = RowFilter_Condition{} } +func (m *RowFilter_Condition) String() string { return proto.CompactTextString(m) } +func (*RowFilter_Condition) ProtoMessage() {} + +func (m *RowFilter_Condition) GetPredicateFilter() *RowFilter { + if m != nil { + return m.PredicateFilter + } + return nil +} + +func (m *RowFilter_Condition) GetTrueFilter() *RowFilter { + if m != nil { + return m.TrueFilter + } + return nil +} + +func (m *RowFilter_Condition) GetFalseFilter() *RowFilter { + if m != nil { + return m.FalseFilter + } + return nil +} + +// Specifies a particular change to be made to the contents of a row. +type Mutation struct { + // Which of the possible Mutation types to apply. + // + // Types that are valid to be assigned to Mutation: + // *Mutation_SetCell_ + // *Mutation_DeleteFromColumn_ + // *Mutation_DeleteFromFamily_ + // *Mutation_DeleteFromRow_ + Mutation isMutation_Mutation `protobuf_oneof:"mutation"` +} + +func (m *Mutation) Reset() { *m = Mutation{} } +func (m *Mutation) String() string { return proto.CompactTextString(m) } +func (*Mutation) ProtoMessage() {} + +type isMutation_Mutation interface { + isMutation_Mutation() +} + +type Mutation_SetCell_ struct { + SetCell *Mutation_SetCell `protobuf:"bytes,1,opt,name=set_cell"` +} +type Mutation_DeleteFromColumn_ struct { + DeleteFromColumn *Mutation_DeleteFromColumn `protobuf:"bytes,2,opt,name=delete_from_column"` +} +type Mutation_DeleteFromFamily_ struct { + DeleteFromFamily *Mutation_DeleteFromFamily `protobuf:"bytes,3,opt,name=delete_from_family"` +} +type Mutation_DeleteFromRow_ struct { + DeleteFromRow *Mutation_DeleteFromRow `protobuf:"bytes,4,opt,name=delete_from_row"` +} + +func (*Mutation_SetCell_) isMutation_Mutation() {} +func (*Mutation_DeleteFromColumn_) isMutation_Mutation() {} +func (*Mutation_DeleteFromFamily_) isMutation_Mutation() {} +func (*Mutation_DeleteFromRow_) isMutation_Mutation() {} + +func (m *Mutation) GetMutation() isMutation_Mutation { + if m != nil { + return m.Mutation + } + return nil +} + +func (m *Mutation) GetSetCell() *Mutation_SetCell { + if x, ok := m.GetMutation().(*Mutation_SetCell_); ok { + return x.SetCell + } + return nil +} + +func (m *Mutation) GetDeleteFromColumn() *Mutation_DeleteFromColumn { + if x, ok := m.GetMutation().(*Mutation_DeleteFromColumn_); ok { + return x.DeleteFromColumn + } + return nil +} + +func (m *Mutation) GetDeleteFromFamily() *Mutation_DeleteFromFamily { + if x, ok := m.GetMutation().(*Mutation_DeleteFromFamily_); ok { + return x.DeleteFromFamily + } + return nil +} + +func (m *Mutation) GetDeleteFromRow() *Mutation_DeleteFromRow { + if x, ok := m.GetMutation().(*Mutation_DeleteFromRow_); ok { + return x.DeleteFromRow + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Mutation) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), []interface{}) { + return _Mutation_OneofMarshaler, _Mutation_OneofUnmarshaler, []interface{}{ + (*Mutation_SetCell_)(nil), + (*Mutation_DeleteFromColumn_)(nil), + (*Mutation_DeleteFromFamily_)(nil), + (*Mutation_DeleteFromRow_)(nil), + } +} + +func _Mutation_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Mutation) + // mutation + switch x := m.Mutation.(type) { + case *Mutation_SetCell_: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SetCell); err != nil { + return err + } + case *Mutation_DeleteFromColumn_: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DeleteFromColumn); err != nil { + return err + } + case *Mutation_DeleteFromFamily_: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DeleteFromFamily); err != nil { + return err + } + case *Mutation_DeleteFromRow_: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DeleteFromRow); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Mutation.Mutation has unexpected type %T", x) + } + return nil +} + +func _Mutation_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Mutation) + switch tag { + case 1: // mutation.set_cell + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Mutation_SetCell) + err := b.DecodeMessage(msg) + m.Mutation = &Mutation_SetCell_{msg} + return true, err + case 2: // mutation.delete_from_column + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Mutation_DeleteFromColumn) + err := b.DecodeMessage(msg) + m.Mutation = &Mutation_DeleteFromColumn_{msg} + return true, err + case 3: // mutation.delete_from_family + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Mutation_DeleteFromFamily) + err := b.DecodeMessage(msg) + m.Mutation = &Mutation_DeleteFromFamily_{msg} + return true, err + case 4: // mutation.delete_from_row + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Mutation_DeleteFromRow) + err := b.DecodeMessage(msg) + m.Mutation = &Mutation_DeleteFromRow_{msg} + return true, err + default: + return false, nil + } +} + +// A Mutation which sets the value of the specified cell. +type Mutation_SetCell struct { + // The name of the family into which new data should be written. + // Must match [-_.a-zA-Z0-9]+ + FamilyName string `protobuf:"bytes,1,opt,name=family_name" json:"family_name,omitempty"` + // The qualifier of the column into which new data should be written. + // Can be any byte string, including the empty string. + ColumnQualifier []byte `protobuf:"bytes,2,opt,name=column_qualifier,proto3" json:"column_qualifier,omitempty"` + // The timestamp of the cell into which new data should be written. + // Use -1 for current Bigtable server time. + // Otherwise, the client should set this value itself, noting that the + // default value is a timestamp of zero if the field is left unspecified. + // Values must match the "granularity" of the table (e.g. micros, millis). + TimestampMicros int64 `protobuf:"varint,3,opt,name=timestamp_micros" json:"timestamp_micros,omitempty"` + // The value to be written into the specified cell. + Value []byte `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *Mutation_SetCell) Reset() { *m = Mutation_SetCell{} } +func (m *Mutation_SetCell) String() string { return proto.CompactTextString(m) } +func (*Mutation_SetCell) ProtoMessage() {} + +// A Mutation which deletes cells from the specified column, optionally +// restricting the deletions to a given timestamp range. +type Mutation_DeleteFromColumn struct { + // The name of the family from which cells should be deleted. + // Must match [-_.a-zA-Z0-9]+ + FamilyName string `protobuf:"bytes,1,opt,name=family_name" json:"family_name,omitempty"` + // The qualifier of the column from which cells should be deleted. + // Can be any byte string, including the empty string. + ColumnQualifier []byte `protobuf:"bytes,2,opt,name=column_qualifier,proto3" json:"column_qualifier,omitempty"` + // The range of timestamps within which cells should be deleted. + TimeRange *TimestampRange `protobuf:"bytes,3,opt,name=time_range" json:"time_range,omitempty"` +} + +func (m *Mutation_DeleteFromColumn) Reset() { *m = Mutation_DeleteFromColumn{} } +func (m *Mutation_DeleteFromColumn) String() string { return proto.CompactTextString(m) } +func (*Mutation_DeleteFromColumn) ProtoMessage() {} + +func (m *Mutation_DeleteFromColumn) GetTimeRange() *TimestampRange { + if m != nil { + return m.TimeRange + } + return nil +} + +// A Mutation which deletes all cells from the specified column family. +type Mutation_DeleteFromFamily struct { + // The name of the family from which cells should be deleted. + // Must match [-_.a-zA-Z0-9]+ + FamilyName string `protobuf:"bytes,1,opt,name=family_name" json:"family_name,omitempty"` +} + +func (m *Mutation_DeleteFromFamily) Reset() { *m = Mutation_DeleteFromFamily{} } +func (m *Mutation_DeleteFromFamily) String() string { return proto.CompactTextString(m) } +func (*Mutation_DeleteFromFamily) ProtoMessage() {} + +// A Mutation which deletes all cells from the containing row. +type Mutation_DeleteFromRow struct { +} + +func (m *Mutation_DeleteFromRow) Reset() { *m = Mutation_DeleteFromRow{} } +func (m *Mutation_DeleteFromRow) String() string { return proto.CompactTextString(m) } +func (*Mutation_DeleteFromRow) ProtoMessage() {} + +// Specifies an atomic read/modify/write operation on the latest value of the +// specified column. +type ReadModifyWriteRule struct { + // The name of the family to which the read/modify/write should be applied. + // Must match [-_.a-zA-Z0-9]+ + FamilyName string `protobuf:"bytes,1,opt,name=family_name" json:"family_name,omitempty"` + // The qualifier of the column to which the read/modify/write should be + // applied. + // Can be any byte string, including the empty string. + ColumnQualifier []byte `protobuf:"bytes,2,opt,name=column_qualifier,proto3" json:"column_qualifier,omitempty"` + // The rule used to determine the column's new latest value from its current + // latest value. + // + // Types that are valid to be assigned to Rule: + // *ReadModifyWriteRule_AppendValue + // *ReadModifyWriteRule_IncrementAmount + Rule isReadModifyWriteRule_Rule `protobuf_oneof:"rule"` +} + +func (m *ReadModifyWriteRule) Reset() { *m = ReadModifyWriteRule{} } +func (m *ReadModifyWriteRule) String() string { return proto.CompactTextString(m) } +func (*ReadModifyWriteRule) ProtoMessage() {} + +type isReadModifyWriteRule_Rule interface { + isReadModifyWriteRule_Rule() +} + +type ReadModifyWriteRule_AppendValue struct { + AppendValue []byte `protobuf:"bytes,3,opt,name=append_value,proto3"` +} +type ReadModifyWriteRule_IncrementAmount struct { + IncrementAmount int64 `protobuf:"varint,4,opt,name=increment_amount"` +} + +func (*ReadModifyWriteRule_AppendValue) isReadModifyWriteRule_Rule() {} +func (*ReadModifyWriteRule_IncrementAmount) isReadModifyWriteRule_Rule() {} + +func (m *ReadModifyWriteRule) GetRule() isReadModifyWriteRule_Rule { + if m != nil { + return m.Rule + } + return nil +} + +func (m *ReadModifyWriteRule) GetAppendValue() []byte { + if x, ok := m.GetRule().(*ReadModifyWriteRule_AppendValue); ok { + return x.AppendValue + } + return nil +} + +func (m *ReadModifyWriteRule) GetIncrementAmount() int64 { + if x, ok := m.GetRule().(*ReadModifyWriteRule_IncrementAmount); ok { + return x.IncrementAmount + } + return 0 +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ReadModifyWriteRule) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), []interface{}) { + return _ReadModifyWriteRule_OneofMarshaler, _ReadModifyWriteRule_OneofUnmarshaler, []interface{}{ + (*ReadModifyWriteRule_AppendValue)(nil), + (*ReadModifyWriteRule_IncrementAmount)(nil), + } +} + +func _ReadModifyWriteRule_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ReadModifyWriteRule) + // rule + switch x := m.Rule.(type) { + case *ReadModifyWriteRule_AppendValue: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeRawBytes(x.AppendValue) + case *ReadModifyWriteRule_IncrementAmount: + b.EncodeVarint(4<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.IncrementAmount)) + case nil: + default: + return fmt.Errorf("ReadModifyWriteRule.Rule has unexpected type %T", x) + } + return nil +} + +func _ReadModifyWriteRule_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ReadModifyWriteRule) + switch tag { + case 3: // rule.append_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Rule = &ReadModifyWriteRule_AppendValue{x} + return true, err + case 4: // rule.increment_amount + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Rule = &ReadModifyWriteRule_IncrementAmount{int64(x)} + return true, err + default: + return false, nil + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/data_proto/bigtable_data.proto b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/data_proto/bigtable_data.proto new file mode 100644 index 000000000..86234d22d --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/data_proto/bigtable_data.proto @@ -0,0 +1,500 @@ +// Copyright (c) 2015, Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.v1; + +option java_multiple_files = true; +option java_outer_classname = "BigtableDataProto"; +option java_package = "com.google.bigtable.v1"; + + +// Specifies the complete (requested) contents of a single row of a table. +// Rows which exceed 256MiB in size cannot be read in full. +message Row { + // The unique key which identifies this row within its table. This is the same + // key that's used to identify the row in, for example, a MutateRowRequest. + // May contain any non-empty byte string up to 4KiB in length. + bytes key = 1; + + // May be empty, but only if the entire row is empty. + // The mutual ordering of column families is not specified. + repeated Family families = 2; +} + +// Specifies (some of) the contents of a single row/column family of a table. +message Family { + // The unique key which identifies this family within its row. This is the + // same key that's used to identify the family in, for example, a RowFilter + // which sets its "family_name_regex_filter" field. + // Must match [-_.a-zA-Z0-9]+, except that AggregatingRowProcessors may + // produce cells in a sentinel family with an empty name. + // Must be no greater than 64 characters in length. + string name = 1; + + // Must not be empty. Sorted in order of increasing "qualifier". + repeated Column columns = 2; +} + +// Specifies (some of) the contents of a single row/column of a table. +message Column { + // The unique key which identifies this column within its family. This is the + // same key that's used to identify the column in, for example, a RowFilter + // which sets its "column_qualifier_regex_filter" field. + // May contain any byte string, including the empty string, up to 16kiB in + // length. + bytes qualifier = 1; + + // Must not be empty. Sorted in order of decreasing "timestamp_micros". + repeated Cell cells = 2; +} + +// Specifies (some of) the contents of a single row/column/timestamp of a table. +message Cell { + // The cell's stored timestamp, which also uniquely identifies it within + // its column. + // Values are always expressed in microseconds, but individual tables may set + // a coarser "granularity" to further restrict the allowed values. For + // example, a table which specifies millisecond granularity will only allow + // values of "timestamp_micros" which are multiples of 1000. + int64 timestamp_micros = 1; + + // The value stored in the cell. + // May contain any byte string, including the empty string, up to 100MiB in + // length. + bytes value = 2; + + // Labels applied to the cell by a [RowFilter][google.bigtable.v1.RowFilter]. + repeated string labels = 3; +} + +// Specifies a contiguous range of rows. +message RowRange { + // Inclusive lower bound. If left empty, interpreted as the empty string. + bytes start_key = 2; + + // Exclusive upper bound. If left empty, interpreted as infinity. + bytes end_key = 3; +} + +// Specifies a contiguous range of columns within a single column family. +// The range spans from : to +// :, where both bounds can be either inclusive or +// exclusive. +message ColumnRange { + // The name of the column family within which this range falls. + string family_name = 1; + + // The column qualifier at which to start the range (within 'column_family'). + // If neither field is set, interpreted as the empty string, inclusive. + oneof start_qualifier { + // Used when giving an inclusive lower bound for the range. + bytes start_qualifier_inclusive = 2; + + // Used when giving an exclusive lower bound for the range. + bytes start_qualifier_exclusive = 3; + } + + // The column qualifier at which to end the range (within 'column_family'). + // If neither field is set, interpreted as the infinite string, exclusive. + oneof end_qualifier { + // Used when giving an inclusive upper bound for the range. + bytes end_qualifier_inclusive = 4; + + // Used when giving an exclusive upper bound for the range. + bytes end_qualifier_exclusive = 5; + } +} + +// Specified a contiguous range of microsecond timestamps. +message TimestampRange { + // Inclusive lower bound. If left empty, interpreted as 0. + int64 start_timestamp_micros = 1; + + // Exclusive upper bound. If left empty, interpreted as infinity. + int64 end_timestamp_micros = 2; +} + +// Specifies a contiguous range of raw byte values. +message ValueRange { + // The value at which to start the range. + // If neither field is set, interpreted as the empty string, inclusive. + oneof start_value { + // Used when giving an inclusive lower bound for the range. + bytes start_value_inclusive = 1; + + // Used when giving an exclusive lower bound for the range. + bytes start_value_exclusive = 2; + } + + // The value at which to end the range. + // If neither field is set, interpreted as the infinite string, exclusive. + oneof end_value { + // Used when giving an inclusive upper bound for the range. + bytes end_value_inclusive = 3; + + // Used when giving an exclusive upper bound for the range. + bytes end_value_exclusive = 4; + } +} + +// Takes a row as input and produces an alternate view of the row based on +// specified rules. For example, a RowFilter might trim down a row to include +// just the cells from columns matching a given regular expression, or might +// return all the cells of a row but not their values. More complicated filters +// can be composed out of these components to express requests such as, "within +// every column of a particular family, give just the two most recent cells +// which are older than timestamp X." +// +// There are two broad categories of RowFilters (true filters and transformers), +// as well as two ways to compose simple filters into more complex ones +// (chains and interleaves). They work as follows: +// +// * True filters alter the input row by excluding some of its cells wholesale +// from the output row. An example of a true filter is the "value_regex_filter", +// which excludes cells whose values don't match the specified pattern. All +// regex true filters use RE2 syntax (https://github.com/google/re2/wiki/Syntax) +// in raw byte mode (RE2::Latin1), and are evaluated as full matches. An +// important point to keep in mind is that RE2(.) is equivalent by default to +// RE2([^\n]), meaning that it does not match newlines. When attempting to match +// an arbitrary byte, you should therefore use the escape sequence '\C', which +// may need to be further escaped as '\\C' in your client language. +// +// * Transformers alter the input row by changing the values of some of its +// cells in the output, without excluding them completely. Currently, the only +// supported transformer is the "strip_value_transformer", which replaces every +// cell's value with the empty string. +// +// * Chains and interleaves are described in more detail in the +// RowFilter.Chain and RowFilter.Interleave documentation. +// +// The total serialized size of a RowFilter message must not +// exceed 4096 bytes, and RowFilters may not be nested within each other +// (in Chains or Interleaves) to a depth of more than 20. +message RowFilter { + // A RowFilter which sends rows through several RowFilters in sequence. + message Chain { + // The elements of "filters" are chained together to process the input row: + // in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row + // The full chain is executed atomically. + repeated RowFilter filters = 1; + } + + // A RowFilter which sends each row to each of several component + // RowFilters and interleaves the results. + message Interleave { + // The elements of "filters" all process a copy of the input row, and the + // results are pooled, sorted, and combined into a single output row. + // If multiple cells are produced with the same column and timestamp, + // they will all appear in the output row in an unspecified mutual order. + // Consider the following example, with three filters: + // + // input row + // | + // ----------------------------------------------------- + // | | | + // f(0) f(1) f(2) + // | | | + // 1: foo,bar,10,x foo,bar,10,z far,bar,7,a + // 2: foo,blah,11,z far,blah,5,x far,blah,5,x + // | | | + // ----------------------------------------------------- + // | + // 1: foo,bar,10,z // could have switched with #2 + // 2: foo,bar,10,x // could have switched with #1 + // 3: foo,blah,11,z + // 4: far,bar,7,a + // 5: far,blah,5,x // identical to #6 + // 6: far,blah,5,x // identical to #5 + // All interleaved filters are executed atomically. + repeated RowFilter filters = 1; + } + + // A RowFilter which evaluates one of two possible RowFilters, depending on + // whether or not a predicate RowFilter outputs any cells from the input row. + // + // IMPORTANT NOTE: The predicate filter does not execute atomically with the + // true and false filters, which may lead to inconsistent or unexpected + // results. Additionally, Condition filters have poor performance, especially + // when filters are set for the false condition. + message Condition { + // If "predicate_filter" outputs any cells, then "true_filter" will be + // evaluated on the input row. Otherwise, "false_filter" will be evaluated. + RowFilter predicate_filter = 1; + + // The filter to apply to the input row if "predicate_filter" returns any + // results. If not provided, no results will be returned in the true case. + RowFilter true_filter = 2; + + // The filter to apply to the input row if "predicate_filter" does not + // return any results. If not provided, no results will be returned in the + // false case. + RowFilter false_filter = 3; + } + + // Which of the possible RowFilter types to apply. If none are set, this + // RowFilter returns all cells in the input row. + oneof filter { + // Applies several RowFilters to the data in sequence, progressively + // narrowing the results. + Chain chain = 1; + + // Applies several RowFilters to the data in parallel and combines the + // results. + Interleave interleave = 2; + + // Applies one of two possible RowFilters to the data based on the output of + // a predicate RowFilter. + Condition condition = 3; + + // ADVANCED USE ONLY. + // Hook for introspection into the RowFilter. Outputs all cells directly to + // the output of the read rather than to any parent filter. Consider the + // following example: + // + // Chain( + // FamilyRegex("A"), + // Interleave( + // All(), + // Chain(Label("foo"), Sink()) + // ), + // QualifierRegex("B") + // ) + // + // A,A,1,w + // A,B,2,x + // B,B,4,z + // | + // FamilyRegex("A") + // | + // A,A,1,w + // A,B,2,x + // | + // +------------+-------------+ + // | | + // All() Label(foo) + // | | + // A,A,1,w A,A,1,w,labels:[foo] + // A,B,2,x A,B,2,x,labels:[foo] + // | | + // | Sink() --------------+ + // | | | + // +------------+ x------+ A,A,1,w,labels:[foo] + // | A,B,2,x,labels:[foo] + // A,A,1,w | + // A,B,2,x | + // | | + // QualifierRegex("B") | + // | | + // A,B,2,x | + // | | + // +--------------------------------+ + // | + // A,A,1,w,labels:[foo] + // A,B,2,x,labels:[foo] // could be switched + // A,B,2,x // could be switched + // + // Despite being excluded by the qualifier filter, a copy of every cell + // that reaches the sink is present in the final result. + // + // As with an [Interleave][google.bigtable.v1.RowFilter.Interleave], + // duplicate cells are possible, and appear in an unspecified mutual order. + // In this case we have a duplicate with column "A:B" and timestamp 2, + // because one copy passed through the all filter while the other was + // passed through the label and sink. Note that one copy has label "foo", + // while the other does not. + // + // Cannot be used within the `predicate_filter`, `true_filter`, or + // `false_filter` of a [Condition][google.bigtable.v1.RowFilter.Condition]. + bool sink = 16; + + // Matches all cells, regardless of input. Functionally equivalent to + // leaving `filter` unset, but included for completeness. + bool pass_all_filter = 17; + + // Does not match any cells, regardless of input. Useful for temporarily + // disabling just part of a filter. + bool block_all_filter = 18; + + // Matches only cells from rows whose keys satisfy the given RE2 regex. In + // other words, passes through the entire row when the key matches, and + // otherwise produces an empty row. + // Note that, since row keys can contain arbitrary bytes, the '\C' escape + // sequence must be used if a true wildcard is desired. The '.' character + // will not match the new line character '\n', which may be present in a + // binary key. + bytes row_key_regex_filter = 4; + + // Matches all cells from a row with probability p, and matches no cells + // from the row with probability 1-p. + double row_sample_filter = 14; + + // Matches only cells from columns whose families satisfy the given RE2 + // regex. For technical reasons, the regex must not contain the ':' + // character, even if it is not being used as a literal. + // Note that, since column families cannot contain the new line character + // '\n', it is sufficient to use '.' as a full wildcard when matching + // column family names. + string family_name_regex_filter = 5; + + // Matches only cells from columns whose qualifiers satisfy the given RE2 + // regex. + // Note that, since column qualifiers can contain arbitrary bytes, the '\C' + // escape sequence must be used if a true wildcard is desired. The '.' + // character will not match the new line character '\n', which may be + // present in a binary qualifier. + bytes column_qualifier_regex_filter = 6; + + // Matches only cells from columns within the given range. + ColumnRange column_range_filter = 7; + + // Matches only cells with timestamps within the given range. + TimestampRange timestamp_range_filter = 8; + + // Matches only cells with values that satisfy the given regular expression. + // Note that, since cell values can contain arbitrary bytes, the '\C' escape + // sequence must be used if a true wildcard is desired. The '.' character + // will not match the new line character '\n', which may be present in a + // binary value. + bytes value_regex_filter = 9; + + // Matches only cells with values that fall within the given range. + ValueRange value_range_filter = 15; + + // Skips the first N cells of each row, matching all subsequent cells. + int32 cells_per_row_offset_filter = 10; + + // Matches only the first N cells of each row. + int32 cells_per_row_limit_filter = 11; + + // Matches only the most recent N cells within each column. For example, + // if N=2, this filter would match column "foo:bar" at timestamps 10 and 9, + // skip all earlier cells in "foo:bar", and then begin matching again in + // column "foo:bar2". + int32 cells_per_column_limit_filter = 12; + + // Replaces each cell's value with the empty string. + bool strip_value_transformer = 13; + + // Applies the given label to all cells in the output row. This allows + // the client to determine which results were produced from which part of + // the filter. + // + // Values must be at most 15 characters in length, and match the RE2 + // pattern [a-z0-9\\-]+ + // + // Due to a technical limitation, it is not currently possible to apply + // multiple labels to a cell. As a result, a Chain may have no more than + // one sub-filter which contains a apply_label_transformer. It is okay for + // an Interleave to contain multiple apply_label_transformers, as they will + // be applied to separate copies of the input. This may be relaxed in the + // future. + string apply_label_transformer = 19; + } +} + +// Specifies a particular change to be made to the contents of a row. +message Mutation { + // A Mutation which sets the value of the specified cell. + message SetCell { + // The name of the family into which new data should be written. + // Must match [-_.a-zA-Z0-9]+ + string family_name = 1; + + // The qualifier of the column into which new data should be written. + // Can be any byte string, including the empty string. + bytes column_qualifier = 2; + + // The timestamp of the cell into which new data should be written. + // Use -1 for current Bigtable server time. + // Otherwise, the client should set this value itself, noting that the + // default value is a timestamp of zero if the field is left unspecified. + // Values must match the "granularity" of the table (e.g. micros, millis). + int64 timestamp_micros = 3; + + // The value to be written into the specified cell. + bytes value = 4; + } + + // A Mutation which deletes cells from the specified column, optionally + // restricting the deletions to a given timestamp range. + message DeleteFromColumn { + // The name of the family from which cells should be deleted. + // Must match [-_.a-zA-Z0-9]+ + string family_name = 1; + + // The qualifier of the column from which cells should be deleted. + // Can be any byte string, including the empty string. + bytes column_qualifier = 2; + + // The range of timestamps within which cells should be deleted. + TimestampRange time_range = 3; + } + + // A Mutation which deletes all cells from the specified column family. + message DeleteFromFamily { + // The name of the family from which cells should be deleted. + // Must match [-_.a-zA-Z0-9]+ + string family_name = 1; + } + + // A Mutation which deletes all cells from the containing row. + message DeleteFromRow { + + } + + // Which of the possible Mutation types to apply. + oneof mutation { + // Set a cell's value. + SetCell set_cell = 1; + + // Deletes cells from a column. + DeleteFromColumn delete_from_column = 2; + + // Deletes cells from a column family. + DeleteFromFamily delete_from_family = 3; + + // Deletes cells from the entire row. + DeleteFromRow delete_from_row = 4; + } +} + +// Specifies an atomic read/modify/write operation on the latest value of the +// specified column. +message ReadModifyWriteRule { + // The name of the family to which the read/modify/write should be applied. + // Must match [-_.a-zA-Z0-9]+ + string family_name = 1; + + // The qualifier of the column to which the read/modify/write should be + // applied. + // Can be any byte string, including the empty string. + bytes column_qualifier = 2; + + // The rule used to determine the column's new latest value from its current + // latest value. + oneof rule { + // Rule specifying that "append_value" be appended to the existing value. + // If the targeted cell is unset, it will be treated as containing the + // empty string. + bytes append_value = 3; + + // Rule specifying that "increment_amount" be added to the existing value. + // If the targeted cell is unset, it will be treated as containing a zero. + // Otherwise, the targeted cell must contain an 8-byte value (interpreted + // as a 64-bit big-endian signed integer), or the entire request will fail. + int64 increment_amount = 4; + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/duration_proto/duration.pb.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/duration_proto/duration.pb.go new file mode 100644 index 000000000..63d6727e3 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/duration_proto/duration.pb.go @@ -0,0 +1,81 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/cloud/bigtable/internal/duration_proto/duration.proto +// DO NOT EDIT! + +/* +Package google_protobuf is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/cloud/bigtable/internal/duration_proto/duration.proto + +It has these top-level messages: + Duration +*/ +package google_protobuf + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +type Duration struct { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` +} + +func (m *Duration) Reset() { *m = Duration{} } +func (m *Duration) String() string { return proto.CompactTextString(m) } +func (*Duration) ProtoMessage() {} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/duration_proto/duration.proto b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/duration_proto/duration.proto new file mode 100644 index 000000000..15e9d44d3 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/duration_proto/duration.proto @@ -0,0 +1,78 @@ +// Copyright (c) 2015, Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.protobuf; + +option java_generate_equals_and_hash = true; +option java_multiple_files = true; +option java_outer_classname = "DurationProto"; +option java_package = "com.google.protobuf"; + + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +message Duration { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. + int64 seconds = 1; + + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + int32 nanos = 2; +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/empty/empty.pb.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/empty/empty.pb.go new file mode 100644 index 000000000..89bb42348 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/empty/empty.pb.go @@ -0,0 +1,38 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/cloud/bigtable/internal/empty/empty.proto +// DO NOT EDIT! + +/* +Package google_protobuf is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/cloud/bigtable/internal/empty/empty.proto + +It has these top-level messages: + Empty +*/ +package google_protobuf + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +type Empty struct { +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/empty/empty.proto b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/empty/empty.proto new file mode 100644 index 000000000..43b06e87b --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/empty/empty.proto @@ -0,0 +1,34 @@ +// Copyright (c) 2015, Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.protobuf; + +option java_multiple_files = true; +option java_outer_classname = "EmptyProto"; +option java_package = "com.google.protobuf"; + + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +message Empty { + +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/regen.sh b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/regen.sh new file mode 100644 index 000000000..6cfa1688b --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/regen.sh @@ -0,0 +1,76 @@ +#!/bin/bash -e +# +# This script rebuilds the generated code for the protocol buffers. +# To run this you will need protoc and goprotobuf installed; +# see https://github.com/golang/protobuf for instructions. +# You also need Go and Git installed. + +PKG=google.golang.org/cloud/bigtable +UPSTREAM=https://github.com/GoogleCloudPlatform/cloud-bigtable-client +UPSTREAM_SUBDIR=bigtable-protos/src/main/proto + +function die() { + echo 1>&2 $* + exit 1 +} + +# Sanity check that the right tools are accessible. +for tool in go git protoc protoc-gen-go; do + q=$(which $tool) || die "didn't find $tool" + echo 1>&2 "$tool: $q" +done + +tmpdir=$(mktemp -d -t regen-cbt.XXXXXX) +trap 'rm -rf $tmpdir' EXIT + +echo -n 1>&2 "finding package dir... " +pkgdir=$(go list -f '{{.Dir}}' $PKG) +echo 1>&2 $pkgdir +base=$(echo $pkgdir | sed "s,/$PKG\$,,") +echo 1>&2 "base: $base" +cd $base + +echo 1>&2 "fetching latest protos... " +git clone -q $UPSTREAM $tmpdir +# Pass 1: build mapping from upstream filename to our filename. +declare -A filename_map +for f in $(cd $PKG && find internal -name '*.proto'); do + echo -n 1>&2 "looking for latest version of $f... " + up=$(cd $tmpdir/$UPSTREAM_SUBDIR && find * -name $(basename $f)) + echo 1>&2 $up + if [ $(echo $up | wc -w) != "1" ]; then + die "not exactly one match" + fi + filename_map[$up]=$f +done +# Pass 2: build sed script for fixing imports. +import_fixes=$tmpdir/fix_imports.sed +for up in "${!filename_map[@]}"; do + f=${filename_map[$up]} + echo >>$import_fixes "s,\"$up\",\"$PKG/$f\"," +done +cat $import_fixes | sed 's,^,### ,' 1>&2 +# Pass 3: copy files, making necessary adjustments. +for up in "${!filename_map[@]}"; do + f=${filename_map[$up]} + cat $tmpdir/$UPSTREAM_SUBDIR/$up | + # Adjust proto imports. + sed -f $import_fixes | + # Drop the UndeleteCluster RPC method. It returns a google.longrunning.Operation. + sed '/^ rpc UndeleteCluster(/,/^ }$/d' | + # Drop annotations, long-running operations and timestamps. They aren't supported (yet). + sed '/"google\/longrunning\/operations.proto"/d' | + sed '/google.longrunning.Operation/d' | + sed '/"google\/protobuf\/timestamp.proto"/d' | + sed '/google\.protobuf\.Timestamp/d' | + sed '/"google\/api\/annotations.proto"/d' | + sed '/option.*google\.api\.http.*{.*};$/d' | + cat > $PKG/$f +done + +# Run protoc once per package. +for dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do + echo 1>&2 "* $dir" + protoc --go_out=plugins=grpc:. $dir/*.proto +done +echo 1>&2 "All OK" diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service.pb.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service.pb.go new file mode 100644 index 000000000..fb7faf875 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service.pb.go @@ -0,0 +1,287 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service.proto +// DO NOT EDIT! + +package google_bigtable_v1 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_bigtable_v11 "google.golang.org/cloud/bigtable/internal/data_proto" +import google_protobuf "google.golang.org/cloud/bigtable/internal/empty" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// Client API for BigtableService service + +type BigtableServiceClient interface { + // Streams back the contents of all requested rows, optionally applying + // the same Reader filter to each. Depending on their size, rows may be + // broken up across multiple responses, but atomicity of each row will still + // be preserved. + ReadRows(ctx context.Context, in *ReadRowsRequest, opts ...grpc.CallOption) (BigtableService_ReadRowsClient, error) + // Returns a sample of row keys in the table. The returned row keys will + // delimit contiguous sections of the table of approximately equal size, + // which can be used to break up the data for distributed tasks like + // mapreduces. + SampleRowKeys(ctx context.Context, in *SampleRowKeysRequest, opts ...grpc.CallOption) (BigtableService_SampleRowKeysClient, error) + // Mutates a row atomically. Cells already present in the row are left + // unchanged unless explicitly changed by 'mutation'. + MutateRow(ctx context.Context, in *MutateRowRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) + // Mutates a row atomically based on the output of a predicate Reader filter. + CheckAndMutateRow(ctx context.Context, in *CheckAndMutateRowRequest, opts ...grpc.CallOption) (*CheckAndMutateRowResponse, error) + // Modifies a row atomically, reading the latest existing timestamp/value from + // the specified columns and writing a new value at + // max(existing timestamp, current server time) based on pre-defined + // read/modify/write rules. Returns the new contents of all modified cells. + ReadModifyWriteRow(ctx context.Context, in *ReadModifyWriteRowRequest, opts ...grpc.CallOption) (*google_bigtable_v11.Row, error) +} + +type bigtableServiceClient struct { + cc *grpc.ClientConn +} + +func NewBigtableServiceClient(cc *grpc.ClientConn) BigtableServiceClient { + return &bigtableServiceClient{cc} +} + +func (c *bigtableServiceClient) ReadRows(ctx context.Context, in *ReadRowsRequest, opts ...grpc.CallOption) (BigtableService_ReadRowsClient, error) { + stream, err := grpc.NewClientStream(ctx, &_BigtableService_serviceDesc.Streams[0], c.cc, "/google.bigtable.v1.BigtableService/ReadRows", opts...) + if err != nil { + return nil, err + } + x := &bigtableServiceReadRowsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type BigtableService_ReadRowsClient interface { + Recv() (*ReadRowsResponse, error) + grpc.ClientStream +} + +type bigtableServiceReadRowsClient struct { + grpc.ClientStream +} + +func (x *bigtableServiceReadRowsClient) Recv() (*ReadRowsResponse, error) { + m := new(ReadRowsResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *bigtableServiceClient) SampleRowKeys(ctx context.Context, in *SampleRowKeysRequest, opts ...grpc.CallOption) (BigtableService_SampleRowKeysClient, error) { + stream, err := grpc.NewClientStream(ctx, &_BigtableService_serviceDesc.Streams[1], c.cc, "/google.bigtable.v1.BigtableService/SampleRowKeys", opts...) + if err != nil { + return nil, err + } + x := &bigtableServiceSampleRowKeysClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type BigtableService_SampleRowKeysClient interface { + Recv() (*SampleRowKeysResponse, error) + grpc.ClientStream +} + +type bigtableServiceSampleRowKeysClient struct { + grpc.ClientStream +} + +func (x *bigtableServiceSampleRowKeysClient) Recv() (*SampleRowKeysResponse, error) { + m := new(SampleRowKeysResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *bigtableServiceClient) MutateRow(ctx context.Context, in *MutateRowRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) { + out := new(google_protobuf.Empty) + err := grpc.Invoke(ctx, "/google.bigtable.v1.BigtableService/MutateRow", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableServiceClient) CheckAndMutateRow(ctx context.Context, in *CheckAndMutateRowRequest, opts ...grpc.CallOption) (*CheckAndMutateRowResponse, error) { + out := new(CheckAndMutateRowResponse) + err := grpc.Invoke(ctx, "/google.bigtable.v1.BigtableService/CheckAndMutateRow", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableServiceClient) ReadModifyWriteRow(ctx context.Context, in *ReadModifyWriteRowRequest, opts ...grpc.CallOption) (*google_bigtable_v11.Row, error) { + out := new(google_bigtable_v11.Row) + err := grpc.Invoke(ctx, "/google.bigtable.v1.BigtableService/ReadModifyWriteRow", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for BigtableService service + +type BigtableServiceServer interface { + // Streams back the contents of all requested rows, optionally applying + // the same Reader filter to each. Depending on their size, rows may be + // broken up across multiple responses, but atomicity of each row will still + // be preserved. + ReadRows(*ReadRowsRequest, BigtableService_ReadRowsServer) error + // Returns a sample of row keys in the table. The returned row keys will + // delimit contiguous sections of the table of approximately equal size, + // which can be used to break up the data for distributed tasks like + // mapreduces. + SampleRowKeys(*SampleRowKeysRequest, BigtableService_SampleRowKeysServer) error + // Mutates a row atomically. Cells already present in the row are left + // unchanged unless explicitly changed by 'mutation'. + MutateRow(context.Context, *MutateRowRequest) (*google_protobuf.Empty, error) + // Mutates a row atomically based on the output of a predicate Reader filter. + CheckAndMutateRow(context.Context, *CheckAndMutateRowRequest) (*CheckAndMutateRowResponse, error) + // Modifies a row atomically, reading the latest existing timestamp/value from + // the specified columns and writing a new value at + // max(existing timestamp, current server time) based on pre-defined + // read/modify/write rules. Returns the new contents of all modified cells. + ReadModifyWriteRow(context.Context, *ReadModifyWriteRowRequest) (*google_bigtable_v11.Row, error) +} + +func RegisterBigtableServiceServer(s *grpc.Server, srv BigtableServiceServer) { + s.RegisterService(&_BigtableService_serviceDesc, srv) +} + +func _BigtableService_ReadRows_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ReadRowsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(BigtableServiceServer).ReadRows(m, &bigtableServiceReadRowsServer{stream}) +} + +type BigtableService_ReadRowsServer interface { + Send(*ReadRowsResponse) error + grpc.ServerStream +} + +type bigtableServiceReadRowsServer struct { + grpc.ServerStream +} + +func (x *bigtableServiceReadRowsServer) Send(m *ReadRowsResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _BigtableService_SampleRowKeys_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SampleRowKeysRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(BigtableServiceServer).SampleRowKeys(m, &bigtableServiceSampleRowKeysServer{stream}) +} + +type BigtableService_SampleRowKeysServer interface { + Send(*SampleRowKeysResponse) error + grpc.ServerStream +} + +type bigtableServiceSampleRowKeysServer struct { + grpc.ServerStream +} + +func (x *bigtableServiceSampleRowKeysServer) Send(m *SampleRowKeysResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _BigtableService_MutateRow_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { + in := new(MutateRowRequest) + if err := codec.Unmarshal(buf, in); err != nil { + return nil, err + } + out, err := srv.(BigtableServiceServer).MutateRow(ctx, in) + if err != nil { + return nil, err + } + return out, nil +} + +func _BigtableService_CheckAndMutateRow_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { + in := new(CheckAndMutateRowRequest) + if err := codec.Unmarshal(buf, in); err != nil { + return nil, err + } + out, err := srv.(BigtableServiceServer).CheckAndMutateRow(ctx, in) + if err != nil { + return nil, err + } + return out, nil +} + +func _BigtableService_ReadModifyWriteRow_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { + in := new(ReadModifyWriteRowRequest) + if err := codec.Unmarshal(buf, in); err != nil { + return nil, err + } + out, err := srv.(BigtableServiceServer).ReadModifyWriteRow(ctx, in) + if err != nil { + return nil, err + } + return out, nil +} + +var _BigtableService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.bigtable.v1.BigtableService", + HandlerType: (*BigtableServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "MutateRow", + Handler: _BigtableService_MutateRow_Handler, + }, + { + MethodName: "CheckAndMutateRow", + Handler: _BigtableService_CheckAndMutateRow_Handler, + }, + { + MethodName: "ReadModifyWriteRow", + Handler: _BigtableService_ReadModifyWriteRow_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "ReadRows", + Handler: _BigtableService_ReadRows_Handler, + ServerStreams: true, + }, + { + StreamName: "SampleRowKeys", + Handler: _BigtableService_SampleRowKeys_Handler, + ServerStreams: true, + }, + }, +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service.proto b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service.proto new file mode 100644 index 000000000..814940a18 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service.proto @@ -0,0 +1,60 @@ +// Copyright (c) 2015, Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.v1; + +import "google.golang.org/cloud/bigtable/internal/data_proto/bigtable_data.proto"; +import "google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service_messages.proto"; +import "google.golang.org/cloud/bigtable/internal/empty/empty.proto"; + +option java_generic_services = true; +option java_multiple_files = true; +option java_outer_classname = "BigtableServicesProto"; +option java_package = "com.google.bigtable.v1"; + + +// Service for reading from and writing to existing Bigtables. +service BigtableService { + // Streams back the contents of all requested rows, optionally applying + // the same Reader filter to each. Depending on their size, rows may be + // broken up across multiple responses, but atomicity of each row will still + // be preserved. + rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) { + } + + // Returns a sample of row keys in the table. The returned row keys will + // delimit contiguous sections of the table of approximately equal size, + // which can be used to break up the data for distributed tasks like + // mapreduces. + rpc SampleRowKeys(SampleRowKeysRequest) returns (stream SampleRowKeysResponse) { + } + + // Mutates a row atomically. Cells already present in the row are left + // unchanged unless explicitly changed by 'mutation'. + rpc MutateRow(MutateRowRequest) returns (google.protobuf.Empty) { + } + + // Mutates a row atomically based on the output of a predicate Reader filter. + rpc CheckAndMutateRow(CheckAndMutateRowRequest) returns (CheckAndMutateRowResponse) { + } + + // Modifies a row atomically, reading the latest existing timestamp/value from + // the specified columns and writing a new value at + // max(existing timestamp, current server time) based on pre-defined + // read/modify/write rules. Returns the new contents of all modified cells. + rpc ReadModifyWriteRow(ReadModifyWriteRowRequest) returns (Row) { + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service_messages.pb.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service_messages.pb.go new file mode 100644 index 000000000..99e4d4a77 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service_messages.pb.go @@ -0,0 +1,444 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service_messages.proto +// DO NOT EDIT! + +/* +Package google_bigtable_v1 is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service_messages.proto + google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service.proto + +It has these top-level messages: + ReadRowsRequest + ReadRowsResponse + SampleRowKeysRequest + SampleRowKeysResponse + MutateRowRequest + CheckAndMutateRowRequest + CheckAndMutateRowResponse + ReadModifyWriteRowRequest +*/ +package google_bigtable_v1 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_bigtable_v11 "google.golang.org/cloud/bigtable/internal/data_proto" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Request message for BigtableServer.ReadRows. +type ReadRowsRequest struct { + // The unique name of the table from which to read. + TableName string `protobuf:"bytes,1,opt,name=table_name" json:"table_name,omitempty"` + // If neither row_key nor row_range is set, reads from all rows. + // + // Types that are valid to be assigned to Target: + // *ReadRowsRequest_RowKey + // *ReadRowsRequest_RowRange + Target isReadRowsRequest_Target `protobuf_oneof:"target"` + // The filter to apply to the contents of the specified row(s). If unset, + // reads the entire table. + Filter *google_bigtable_v11.RowFilter `protobuf:"bytes,5,opt,name=filter" json:"filter,omitempty"` + // By default, rows are read sequentially, producing results which are + // guaranteed to arrive in increasing row order. Setting + // "allow_row_interleaving" to true allows multiple rows to be interleaved in + // the response stream, which increases throughput but breaks this guarantee, + // and may force the client to use more memory to buffer partially-received + // rows. Cannot be set to true when specifying "num_rows_limit". + AllowRowInterleaving bool `protobuf:"varint,6,opt,name=allow_row_interleaving" json:"allow_row_interleaving,omitempty"` + // The read will terminate after committing to N rows' worth of results. The + // default (zero) is to return all results. + // Note that "allow_row_interleaving" cannot be set to true when this is set. + NumRowsLimit int64 `protobuf:"varint,7,opt,name=num_rows_limit" json:"num_rows_limit,omitempty"` +} + +func (m *ReadRowsRequest) Reset() { *m = ReadRowsRequest{} } +func (m *ReadRowsRequest) String() string { return proto.CompactTextString(m) } +func (*ReadRowsRequest) ProtoMessage() {} + +type isReadRowsRequest_Target interface { + isReadRowsRequest_Target() +} + +type ReadRowsRequest_RowKey struct { + RowKey []byte `protobuf:"bytes,2,opt,name=row_key,proto3"` +} +type ReadRowsRequest_RowRange struct { + RowRange *google_bigtable_v11.RowRange `protobuf:"bytes,3,opt,name=row_range"` +} + +func (*ReadRowsRequest_RowKey) isReadRowsRequest_Target() {} +func (*ReadRowsRequest_RowRange) isReadRowsRequest_Target() {} + +func (m *ReadRowsRequest) GetTarget() isReadRowsRequest_Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *ReadRowsRequest) GetRowKey() []byte { + if x, ok := m.GetTarget().(*ReadRowsRequest_RowKey); ok { + return x.RowKey + } + return nil +} + +func (m *ReadRowsRequest) GetRowRange() *google_bigtable_v11.RowRange { + if x, ok := m.GetTarget().(*ReadRowsRequest_RowRange); ok { + return x.RowRange + } + return nil +} + +func (m *ReadRowsRequest) GetFilter() *google_bigtable_v11.RowFilter { + if m != nil { + return m.Filter + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ReadRowsRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), []interface{}) { + return _ReadRowsRequest_OneofMarshaler, _ReadRowsRequest_OneofUnmarshaler, []interface{}{ + (*ReadRowsRequest_RowKey)(nil), + (*ReadRowsRequest_RowRange)(nil), + } +} + +func _ReadRowsRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ReadRowsRequest) + // target + switch x := m.Target.(type) { + case *ReadRowsRequest_RowKey: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeRawBytes(x.RowKey) + case *ReadRowsRequest_RowRange: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RowRange); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ReadRowsRequest.Target has unexpected type %T", x) + } + return nil +} + +func _ReadRowsRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ReadRowsRequest) + switch tag { + case 2: // target.row_key + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Target = &ReadRowsRequest_RowKey{x} + return true, err + case 3: // target.row_range + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_bigtable_v11.RowRange) + err := b.DecodeMessage(msg) + m.Target = &ReadRowsRequest_RowRange{msg} + return true, err + default: + return false, nil + } +} + +// Response message for BigtableService.ReadRows. +type ReadRowsResponse struct { + // The key of the row for which we're receiving data. + // Results will be received in increasing row key order, unless + // "allow_row_interleaving" was specified in the request. + RowKey []byte `protobuf:"bytes,1,opt,name=row_key,proto3" json:"row_key,omitempty"` + // One or more chunks of the row specified by "row_key". + Chunks []*ReadRowsResponse_Chunk `protobuf:"bytes,2,rep,name=chunks" json:"chunks,omitempty"` +} + +func (m *ReadRowsResponse) Reset() { *m = ReadRowsResponse{} } +func (m *ReadRowsResponse) String() string { return proto.CompactTextString(m) } +func (*ReadRowsResponse) ProtoMessage() {} + +func (m *ReadRowsResponse) GetChunks() []*ReadRowsResponse_Chunk { + if m != nil { + return m.Chunks + } + return nil +} + +// Specifies a piece of a row's contents returned as part of the read +// response stream. +type ReadRowsResponse_Chunk struct { + // Types that are valid to be assigned to Chunk: + // *ReadRowsResponse_Chunk_RowContents + // *ReadRowsResponse_Chunk_ResetRow + // *ReadRowsResponse_Chunk_CommitRow + Chunk isReadRowsResponse_Chunk_Chunk `protobuf_oneof:"chunk"` +} + +func (m *ReadRowsResponse_Chunk) Reset() { *m = ReadRowsResponse_Chunk{} } +func (m *ReadRowsResponse_Chunk) String() string { return proto.CompactTextString(m) } +func (*ReadRowsResponse_Chunk) ProtoMessage() {} + +type isReadRowsResponse_Chunk_Chunk interface { + isReadRowsResponse_Chunk_Chunk() +} + +type ReadRowsResponse_Chunk_RowContents struct { + RowContents *google_bigtable_v11.Family `protobuf:"bytes,1,opt,name=row_contents"` +} +type ReadRowsResponse_Chunk_ResetRow struct { + ResetRow bool `protobuf:"varint,2,opt,name=reset_row"` +} +type ReadRowsResponse_Chunk_CommitRow struct { + CommitRow bool `protobuf:"varint,3,opt,name=commit_row"` +} + +func (*ReadRowsResponse_Chunk_RowContents) isReadRowsResponse_Chunk_Chunk() {} +func (*ReadRowsResponse_Chunk_ResetRow) isReadRowsResponse_Chunk_Chunk() {} +func (*ReadRowsResponse_Chunk_CommitRow) isReadRowsResponse_Chunk_Chunk() {} + +func (m *ReadRowsResponse_Chunk) GetChunk() isReadRowsResponse_Chunk_Chunk { + if m != nil { + return m.Chunk + } + return nil +} + +func (m *ReadRowsResponse_Chunk) GetRowContents() *google_bigtable_v11.Family { + if x, ok := m.GetChunk().(*ReadRowsResponse_Chunk_RowContents); ok { + return x.RowContents + } + return nil +} + +func (m *ReadRowsResponse_Chunk) GetResetRow() bool { + if x, ok := m.GetChunk().(*ReadRowsResponse_Chunk_ResetRow); ok { + return x.ResetRow + } + return false +} + +func (m *ReadRowsResponse_Chunk) GetCommitRow() bool { + if x, ok := m.GetChunk().(*ReadRowsResponse_Chunk_CommitRow); ok { + return x.CommitRow + } + return false +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ReadRowsResponse_Chunk) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), []interface{}) { + return _ReadRowsResponse_Chunk_OneofMarshaler, _ReadRowsResponse_Chunk_OneofUnmarshaler, []interface{}{ + (*ReadRowsResponse_Chunk_RowContents)(nil), + (*ReadRowsResponse_Chunk_ResetRow)(nil), + (*ReadRowsResponse_Chunk_CommitRow)(nil), + } +} + +func _ReadRowsResponse_Chunk_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ReadRowsResponse_Chunk) + // chunk + switch x := m.Chunk.(type) { + case *ReadRowsResponse_Chunk_RowContents: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RowContents); err != nil { + return err + } + case *ReadRowsResponse_Chunk_ResetRow: + t := uint64(0) + if x.ResetRow { + t = 1 + } + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *ReadRowsResponse_Chunk_CommitRow: + t := uint64(0) + if x.CommitRow { + t = 1 + } + b.EncodeVarint(3<<3 | proto.WireVarint) + b.EncodeVarint(t) + case nil: + default: + return fmt.Errorf("ReadRowsResponse_Chunk.Chunk has unexpected type %T", x) + } + return nil +} + +func _ReadRowsResponse_Chunk_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ReadRowsResponse_Chunk) + switch tag { + case 1: // chunk.row_contents + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_bigtable_v11.Family) + err := b.DecodeMessage(msg) + m.Chunk = &ReadRowsResponse_Chunk_RowContents{msg} + return true, err + case 2: // chunk.reset_row + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Chunk = &ReadRowsResponse_Chunk_ResetRow{x != 0} + return true, err + case 3: // chunk.commit_row + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Chunk = &ReadRowsResponse_Chunk_CommitRow{x != 0} + return true, err + default: + return false, nil + } +} + +// Request message for BigtableService.SampleRowKeys. +type SampleRowKeysRequest struct { + // The unique name of the table from which to sample row keys. + TableName string `protobuf:"bytes,1,opt,name=table_name" json:"table_name,omitempty"` +} + +func (m *SampleRowKeysRequest) Reset() { *m = SampleRowKeysRequest{} } +func (m *SampleRowKeysRequest) String() string { return proto.CompactTextString(m) } +func (*SampleRowKeysRequest) ProtoMessage() {} + +// Response message for BigtableService.SampleRowKeys. +type SampleRowKeysResponse struct { + // Sorted streamed sequence of sample row keys in the table. The table might + // have contents before the first row key in the list and after the last one, + // but a key containing the empty string indicates "end of table" and will be + // the last response given, if present. + // Note that row keys in this list may not have ever been written to or read + // from, and users should therefore not make any assumptions about the row key + // structure that are specific to their use case. + RowKey []byte `protobuf:"bytes,1,opt,name=row_key,proto3" json:"row_key,omitempty"` + // Approximate total storage space used by all rows in the table which precede + // "row_key". Buffering the contents of all rows between two subsequent + // samples would require space roughly equal to the difference in their + // "offset_bytes" fields. + OffsetBytes int64 `protobuf:"varint,2,opt,name=offset_bytes" json:"offset_bytes,omitempty"` +} + +func (m *SampleRowKeysResponse) Reset() { *m = SampleRowKeysResponse{} } +func (m *SampleRowKeysResponse) String() string { return proto.CompactTextString(m) } +func (*SampleRowKeysResponse) ProtoMessage() {} + +// Request message for BigtableService.MutateRow. +type MutateRowRequest struct { + // The unique name of the table to which the mutation should be applied. + TableName string `protobuf:"bytes,1,opt,name=table_name" json:"table_name,omitempty"` + // The key of the row to which the mutation should be applied. + RowKey []byte `protobuf:"bytes,2,opt,name=row_key,proto3" json:"row_key,omitempty"` + // Changes to be atomically applied to the specified row. Entries are applied + // in order, meaning that earlier mutations can be masked by later ones. + // Must contain at least one entry and at most 100000. + Mutations []*google_bigtable_v11.Mutation `protobuf:"bytes,3,rep,name=mutations" json:"mutations,omitempty"` +} + +func (m *MutateRowRequest) Reset() { *m = MutateRowRequest{} } +func (m *MutateRowRequest) String() string { return proto.CompactTextString(m) } +func (*MutateRowRequest) ProtoMessage() {} + +func (m *MutateRowRequest) GetMutations() []*google_bigtable_v11.Mutation { + if m != nil { + return m.Mutations + } + return nil +} + +// Request message for BigtableService.CheckAndMutateRowRequest +type CheckAndMutateRowRequest struct { + // The unique name of the table to which the conditional mutation should be + // applied. + TableName string `protobuf:"bytes,1,opt,name=table_name" json:"table_name,omitempty"` + // The key of the row to which the conditional mutation should be applied. + RowKey []byte `protobuf:"bytes,2,opt,name=row_key,proto3" json:"row_key,omitempty"` + // The filter to be applied to the contents of the specified row. Depending + // on whether or not any results are yielded, either "true_mutations" or + // "false_mutations" will be executed. If unset, checks that the row contains + // any values at all. + PredicateFilter *google_bigtable_v11.RowFilter `protobuf:"bytes,6,opt,name=predicate_filter" json:"predicate_filter,omitempty"` + // Changes to be atomically applied to the specified row if "predicate_filter" + // yields at least one cell when applied to "row_key". Entries are applied in + // order, meaning that earlier mutations can be masked by later ones. + // Must contain at least one entry if "false_mutations" is empty, and at most + // 100000. + TrueMutations []*google_bigtable_v11.Mutation `protobuf:"bytes,4,rep,name=true_mutations" json:"true_mutations,omitempty"` + // Changes to be atomically applied to the specified row if "predicate_filter" + // does not yield any cells when applied to "row_key". Entries are applied in + // order, meaning that earlier mutations can be masked by later ones. + // Must contain at least one entry if "true_mutations" is empty, and at most + // 100000. + FalseMutations []*google_bigtable_v11.Mutation `protobuf:"bytes,5,rep,name=false_mutations" json:"false_mutations,omitempty"` +} + +func (m *CheckAndMutateRowRequest) Reset() { *m = CheckAndMutateRowRequest{} } +func (m *CheckAndMutateRowRequest) String() string { return proto.CompactTextString(m) } +func (*CheckAndMutateRowRequest) ProtoMessage() {} + +func (m *CheckAndMutateRowRequest) GetPredicateFilter() *google_bigtable_v11.RowFilter { + if m != nil { + return m.PredicateFilter + } + return nil +} + +func (m *CheckAndMutateRowRequest) GetTrueMutations() []*google_bigtable_v11.Mutation { + if m != nil { + return m.TrueMutations + } + return nil +} + +func (m *CheckAndMutateRowRequest) GetFalseMutations() []*google_bigtable_v11.Mutation { + if m != nil { + return m.FalseMutations + } + return nil +} + +// Response message for BigtableService.CheckAndMutateRowRequest. +type CheckAndMutateRowResponse struct { + // Whether or not the request's "predicate_filter" yielded any results for + // the specified row. + PredicateMatched bool `protobuf:"varint,1,opt,name=predicate_matched" json:"predicate_matched,omitempty"` +} + +func (m *CheckAndMutateRowResponse) Reset() { *m = CheckAndMutateRowResponse{} } +func (m *CheckAndMutateRowResponse) String() string { return proto.CompactTextString(m) } +func (*CheckAndMutateRowResponse) ProtoMessage() {} + +// Request message for BigtableService.ReadModifyWriteRowRequest. +type ReadModifyWriteRowRequest struct { + // The unique name of the table to which the read/modify/write rules should be + // applied. + TableName string `protobuf:"bytes,1,opt,name=table_name" json:"table_name,omitempty"` + // The key of the row to which the read/modify/write rules should be applied. + RowKey []byte `protobuf:"bytes,2,opt,name=row_key,proto3" json:"row_key,omitempty"` + // Rules specifying how the specified row's contents are to be transformed + // into writes. Entries are applied in order, meaning that earlier rules will + // affect the results of later ones. + Rules []*google_bigtable_v11.ReadModifyWriteRule `protobuf:"bytes,3,rep,name=rules" json:"rules,omitempty"` +} + +func (m *ReadModifyWriteRowRequest) Reset() { *m = ReadModifyWriteRowRequest{} } +func (m *ReadModifyWriteRowRequest) String() string { return proto.CompactTextString(m) } +func (*ReadModifyWriteRowRequest) ProtoMessage() {} + +func (m *ReadModifyWriteRowRequest) GetRules() []*google_bigtable_v11.ReadModifyWriteRule { + if m != nil { + return m.Rules + } + return nil +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service_messages.proto b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service_messages.proto new file mode 100644 index 000000000..661310ad4 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service_messages.proto @@ -0,0 +1,177 @@ +// Copyright (c) 2015, Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.v1; + +import "google.golang.org/cloud/bigtable/internal/data_proto/bigtable_data.proto"; + +option java_multiple_files = true; +option java_outer_classname = "BigtableServiceMessagesProto"; +option java_package = "com.google.bigtable.v1"; + + +// Request message for BigtableServer.ReadRows. +message ReadRowsRequest { + // The unique name of the table from which to read. + string table_name = 1; + + // If neither row_key nor row_range is set, reads from all rows. + oneof target { + // The key of a single row from which to read. + bytes row_key = 2; + + // A range of rows from which to read. + RowRange row_range = 3; + } + + // The filter to apply to the contents of the specified row(s). If unset, + // reads the entire table. + RowFilter filter = 5; + + // By default, rows are read sequentially, producing results which are + // guaranteed to arrive in increasing row order. Setting + // "allow_row_interleaving" to true allows multiple rows to be interleaved in + // the response stream, which increases throughput but breaks this guarantee, + // and may force the client to use more memory to buffer partially-received + // rows. Cannot be set to true when specifying "num_rows_limit". + bool allow_row_interleaving = 6; + + // The read will terminate after committing to N rows' worth of results. The + // default (zero) is to return all results. + // Note that "allow_row_interleaving" cannot be set to true when this is set. + int64 num_rows_limit = 7; +} + +// Response message for BigtableService.ReadRows. +message ReadRowsResponse { + // Specifies a piece of a row's contents returned as part of the read + // response stream. + message Chunk { + oneof chunk { + // A subset of the data from a particular row. As long as no "reset_row" + // is received in between, multiple "row_contents" from the same row are + // from the same atomic view of that row, and will be received in the + // expected family/column/timestamp order. + Family row_contents = 1; + + // Indicates that the client should drop all previous chunks for + // "row_key", as it will be re-read from the beginning. + bool reset_row = 2; + + // Indicates that the client can safely process all previous chunks for + // "row_key", as its data has been fully read. + bool commit_row = 3; + } + } + + // The key of the row for which we're receiving data. + // Results will be received in increasing row key order, unless + // "allow_row_interleaving" was specified in the request. + bytes row_key = 1; + + // One or more chunks of the row specified by "row_key". + repeated Chunk chunks = 2; +} + +// Request message for BigtableService.SampleRowKeys. +message SampleRowKeysRequest { + // The unique name of the table from which to sample row keys. + string table_name = 1; +} + +// Response message for BigtableService.SampleRowKeys. +message SampleRowKeysResponse { + // Sorted streamed sequence of sample row keys in the table. The table might + // have contents before the first row key in the list and after the last one, + // but a key containing the empty string indicates "end of table" and will be + // the last response given, if present. + // Note that row keys in this list may not have ever been written to or read + // from, and users should therefore not make any assumptions about the row key + // structure that are specific to their use case. + bytes row_key = 1; + + // Approximate total storage space used by all rows in the table which precede + // "row_key". Buffering the contents of all rows between two subsequent + // samples would require space roughly equal to the difference in their + // "offset_bytes" fields. + int64 offset_bytes = 2; +} + +// Request message for BigtableService.MutateRow. +message MutateRowRequest { + // The unique name of the table to which the mutation should be applied. + string table_name = 1; + + // The key of the row to which the mutation should be applied. + bytes row_key = 2; + + // Changes to be atomically applied to the specified row. Entries are applied + // in order, meaning that earlier mutations can be masked by later ones. + // Must contain at least one entry and at most 100000. + repeated Mutation mutations = 3; +} + +// Request message for BigtableService.CheckAndMutateRowRequest +message CheckAndMutateRowRequest { + // The unique name of the table to which the conditional mutation should be + // applied. + string table_name = 1; + + // The key of the row to which the conditional mutation should be applied. + bytes row_key = 2; + + // The filter to be applied to the contents of the specified row. Depending + // on whether or not any results are yielded, either "true_mutations" or + // "false_mutations" will be executed. If unset, checks that the row contains + // any values at all. + RowFilter predicate_filter = 6; + + // Changes to be atomically applied to the specified row if "predicate_filter" + // yields at least one cell when applied to "row_key". Entries are applied in + // order, meaning that earlier mutations can be masked by later ones. + // Must contain at least one entry if "false_mutations" is empty, and at most + // 100000. + repeated Mutation true_mutations = 4; + + // Changes to be atomically applied to the specified row if "predicate_filter" + // does not yield any cells when applied to "row_key". Entries are applied in + // order, meaning that earlier mutations can be masked by later ones. + // Must contain at least one entry if "true_mutations" is empty, and at most + // 100000. + repeated Mutation false_mutations = 5; +} + +// Response message for BigtableService.CheckAndMutateRowRequest. +message CheckAndMutateRowResponse { + // Whether or not the request's "predicate_filter" yielded any results for + // the specified row. + bool predicate_matched = 1; +} + +// Request message for BigtableService.ReadModifyWriteRowRequest. +message ReadModifyWriteRowRequest { + // The unique name of the table to which the read/modify/write rules should be + // applied. + string table_name = 1; + + // The key of the row to which the read/modify/write rules should be applied. + bytes row_key = 2; + + // Rules specifying how the specified row's contents are to be transformed + // into writes. Entries are applied in order, meaning that earlier rules will + // affect the results of later ones. + repeated ReadModifyWriteRule rules = 3; +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/table_data_proto/bigtable_table_data.pb.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/table_data_proto/bigtable_table_data.pb.go new file mode 100644 index 000000000..b3ee9afe6 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/table_data_proto/bigtable_table_data.pb.go @@ -0,0 +1,308 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/cloud/bigtable/internal/table_data_proto/bigtable_table_data.proto +// DO NOT EDIT! + +/* +Package google_bigtable_admin_table_v1 is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/cloud/bigtable/internal/table_data_proto/bigtable_table_data.proto + +It has these top-level messages: + Table + ColumnFamily + GcRule +*/ +package google_bigtable_admin_table_v1 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "google.golang.org/cloud/bigtable/internal/duration_proto" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type Table_TimestampGranularity int32 + +const ( + Table_MILLIS Table_TimestampGranularity = 0 +) + +var Table_TimestampGranularity_name = map[int32]string{ + 0: "MILLIS", +} +var Table_TimestampGranularity_value = map[string]int32{ + "MILLIS": 0, +} + +func (x Table_TimestampGranularity) String() string { + return proto.EnumName(Table_TimestampGranularity_name, int32(x)) +} + +// A collection of user data indexed by row, column, and timestamp. +// Each table is served using the resources of its parent cluster. +type Table struct { + // A unique identifier of the form + // /tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]* + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The column families configured for this table, mapped by column family id. + ColumnFamilies map[string]*ColumnFamily `protobuf:"bytes,3,rep,name=column_families" json:"column_families,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // The granularity (e.g. MILLIS, MICROS) at which timestamps are stored in + // this table. Timestamps not matching the granularity will be rejected. + // Cannot be changed once the table is created. + Granularity Table_TimestampGranularity `protobuf:"varint,4,opt,name=granularity,enum=google.bigtable.admin.table.v1.Table_TimestampGranularity" json:"granularity,omitempty"` +} + +func (m *Table) Reset() { *m = Table{} } +func (m *Table) String() string { return proto.CompactTextString(m) } +func (*Table) ProtoMessage() {} + +func (m *Table) GetColumnFamilies() map[string]*ColumnFamily { + if m != nil { + return m.ColumnFamilies + } + return nil +} + +// A set of columns within a table which share a common configuration. +type ColumnFamily struct { + // A unique identifier of the form /columnFamilies/[-_.a-zA-Z0-9]+ + // The last segment is the same as the "name" field in + // google.bigtable.v1.Family. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Garbage collection expression specified by the following grammar: + // GC = EXPR + // | "" ; + // EXPR = EXPR, "||", EXPR (* lowest precedence *) + // | EXPR, "&&", EXPR + // | "(", EXPR, ")" (* highest precedence *) + // | PROP ; + // PROP = "version() >", NUM32 + // | "age() >", NUM64, [ UNIT ] ; + // NUM32 = non-zero-digit { digit } ; (* # NUM32 <= 2^32 - 1 *) + // NUM64 = non-zero-digit { digit } ; (* # NUM64 <= 2^63 - 1 *) + // UNIT = "d" | "h" | "m" (* d=days, h=hours, m=minutes, else micros *) + // GC expressions can be up to 500 characters in length + // + // The different types of PROP are defined as follows: + // version() - cell index, counting from most recent and starting at 1 + // age() - age of the cell (current time minus cell timestamp) + // + // Example: "version() > 3 || (age() > 3d && version() > 1)" + // drop cells beyond the most recent three, and drop cells older than three + // days unless they're the most recent cell in the row/column + // + // Garbage collection executes opportunistically in the background, and so + // it's possible for reads to return a cell even if it matches the active GC + // expression for its family. + GcExpression string `protobuf:"bytes,2,opt,name=gc_expression" json:"gc_expression,omitempty"` + // Garbage collection rule specified as a protobuf. + // Supersedes `gc_expression`. + // Must serialize to at most 500 bytes. + // + // NOTE: Garbage collection executes opportunistically in the background, and + // so it's possible for reads to return a cell even if it matches the active + // GC expression for its family. + GcRule *GcRule `protobuf:"bytes,3,opt,name=gc_rule" json:"gc_rule,omitempty"` +} + +func (m *ColumnFamily) Reset() { *m = ColumnFamily{} } +func (m *ColumnFamily) String() string { return proto.CompactTextString(m) } +func (*ColumnFamily) ProtoMessage() {} + +func (m *ColumnFamily) GetGcRule() *GcRule { + if m != nil { + return m.GcRule + } + return nil +} + +// Rule for determining which cells to delete during garbage collection. +type GcRule struct { + // Types that are valid to be assigned to Rule: + // *GcRule_MaxNumVersions + // *GcRule_MaxAge + // *GcRule_Intersection_ + // *GcRule_Union_ + Rule isGcRule_Rule `protobuf_oneof:"rule"` +} + +func (m *GcRule) Reset() { *m = GcRule{} } +func (m *GcRule) String() string { return proto.CompactTextString(m) } +func (*GcRule) ProtoMessage() {} + +type isGcRule_Rule interface { + isGcRule_Rule() +} + +type GcRule_MaxNumVersions struct { + MaxNumVersions int32 `protobuf:"varint,1,opt,name=max_num_versions"` +} +type GcRule_MaxAge struct { + MaxAge *google_protobuf.Duration `protobuf:"bytes,2,opt,name=max_age"` +} +type GcRule_Intersection_ struct { + Intersection *GcRule_Intersection `protobuf:"bytes,3,opt,name=intersection"` +} +type GcRule_Union_ struct { + Union *GcRule_Union `protobuf:"bytes,4,opt,name=union"` +} + +func (*GcRule_MaxNumVersions) isGcRule_Rule() {} +func (*GcRule_MaxAge) isGcRule_Rule() {} +func (*GcRule_Intersection_) isGcRule_Rule() {} +func (*GcRule_Union_) isGcRule_Rule() {} + +func (m *GcRule) GetRule() isGcRule_Rule { + if m != nil { + return m.Rule + } + return nil +} + +func (m *GcRule) GetMaxNumVersions() int32 { + if x, ok := m.GetRule().(*GcRule_MaxNumVersions); ok { + return x.MaxNumVersions + } + return 0 +} + +func (m *GcRule) GetMaxAge() *google_protobuf.Duration { + if x, ok := m.GetRule().(*GcRule_MaxAge); ok { + return x.MaxAge + } + return nil +} + +func (m *GcRule) GetIntersection() *GcRule_Intersection { + if x, ok := m.GetRule().(*GcRule_Intersection_); ok { + return x.Intersection + } + return nil +} + +func (m *GcRule) GetUnion() *GcRule_Union { + if x, ok := m.GetRule().(*GcRule_Union_); ok { + return x.Union + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*GcRule) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), []interface{}) { + return _GcRule_OneofMarshaler, _GcRule_OneofUnmarshaler, []interface{}{ + (*GcRule_MaxNumVersions)(nil), + (*GcRule_MaxAge)(nil), + (*GcRule_Intersection_)(nil), + (*GcRule_Union_)(nil), + } +} + +func _GcRule_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*GcRule) + // rule + switch x := m.Rule.(type) { + case *GcRule_MaxNumVersions: + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.MaxNumVersions)) + case *GcRule_MaxAge: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.MaxAge); err != nil { + return err + } + case *GcRule_Intersection_: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Intersection); err != nil { + return err + } + case *GcRule_Union_: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Union); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("GcRule.Rule has unexpected type %T", x) + } + return nil +} + +func _GcRule_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*GcRule) + switch tag { + case 1: // rule.max_num_versions + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Rule = &GcRule_MaxNumVersions{int32(x)} + return true, err + case 2: // rule.max_age + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf.Duration) + err := b.DecodeMessage(msg) + m.Rule = &GcRule_MaxAge{msg} + return true, err + case 3: // rule.intersection + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(GcRule_Intersection) + err := b.DecodeMessage(msg) + m.Rule = &GcRule_Intersection_{msg} + return true, err + case 4: // rule.union + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(GcRule_Union) + err := b.DecodeMessage(msg) + m.Rule = &GcRule_Union_{msg} + return true, err + default: + return false, nil + } +} + +// A GcRule which deletes cells matching all of the given rules. +type GcRule_Intersection struct { + // Only delete cells which would be deleted by every element of `rules`. + Rules []*GcRule `protobuf:"bytes,1,rep,name=rules" json:"rules,omitempty"` +} + +func (m *GcRule_Intersection) Reset() { *m = GcRule_Intersection{} } +func (m *GcRule_Intersection) String() string { return proto.CompactTextString(m) } +func (*GcRule_Intersection) ProtoMessage() {} + +func (m *GcRule_Intersection) GetRules() []*GcRule { + if m != nil { + return m.Rules + } + return nil +} + +// A GcRule which deletes cells matching any of the given rules. +type GcRule_Union struct { + // Delete cells which would be deleted by any element of `rules`. + Rules []*GcRule `protobuf:"bytes,1,rep,name=rules" json:"rules,omitempty"` +} + +func (m *GcRule_Union) Reset() { *m = GcRule_Union{} } +func (m *GcRule_Union) String() string { return proto.CompactTextString(m) } +func (*GcRule_Union) ProtoMessage() {} + +func (m *GcRule_Union) GetRules() []*GcRule { + if m != nil { + return m.Rules + } + return nil +} + +func init() { + proto.RegisterEnum("google.bigtable.admin.table.v1.Table_TimestampGranularity", Table_TimestampGranularity_name, Table_TimestampGranularity_value) +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/table_data_proto/bigtable_table_data.proto b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/table_data_proto/bigtable_table_data.proto new file mode 100644 index 000000000..a815152d1 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/table_data_proto/bigtable_table_data.proto @@ -0,0 +1,123 @@ +// Copyright (c) 2015, Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.admin.table.v1; + +import "google.golang.org/cloud/bigtable/internal/duration_proto/duration.proto"; + +option java_multiple_files = true; +option java_outer_classname = "BigtableTableDataProto"; +option java_package = "com.google.bigtable.admin.table.v1"; + + +// A collection of user data indexed by row, column, and timestamp. +// Each table is served using the resources of its parent cluster. +message Table { + enum TimestampGranularity { + MILLIS = 0; + } + + // A unique identifier of the form + // /tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]* + string name = 1; + + // If this Table is in the process of being created, the Operation used to + // track its progress. As long as this operation is present, the Table will + // not accept any Table Admin or Read/Write requests. + + // The column families configured for this table, mapped by column family id. + map column_families = 3; + + // The granularity (e.g. MILLIS, MICROS) at which timestamps are stored in + // this table. Timestamps not matching the granularity will be rejected. + // Cannot be changed once the table is created. + TimestampGranularity granularity = 4; +} + +// A set of columns within a table which share a common configuration. +message ColumnFamily { + // A unique identifier of the form /columnFamilies/[-_.a-zA-Z0-9]+ + // The last segment is the same as the "name" field in + // google.bigtable.v1.Family. + string name = 1; + + // Garbage collection expression specified by the following grammar: + // GC = EXPR + // | "" ; + // EXPR = EXPR, "||", EXPR (* lowest precedence *) + // | EXPR, "&&", EXPR + // | "(", EXPR, ")" (* highest precedence *) + // | PROP ; + // PROP = "version() >", NUM32 + // | "age() >", NUM64, [ UNIT ] ; + // NUM32 = non-zero-digit { digit } ; (* # NUM32 <= 2^32 - 1 *) + // NUM64 = non-zero-digit { digit } ; (* # NUM64 <= 2^63 - 1 *) + // UNIT = "d" | "h" | "m" (* d=days, h=hours, m=minutes, else micros *) + // GC expressions can be up to 500 characters in length + // + // The different types of PROP are defined as follows: + // version() - cell index, counting from most recent and starting at 1 + // age() - age of the cell (current time minus cell timestamp) + // + // Example: "version() > 3 || (age() > 3d && version() > 1)" + // drop cells beyond the most recent three, and drop cells older than three + // days unless they're the most recent cell in the row/column + // + // Garbage collection executes opportunistically in the background, and so + // it's possible for reads to return a cell even if it matches the active GC + // expression for its family. + string gc_expression = 2; + + // Garbage collection rule specified as a protobuf. + // Supersedes `gc_expression`. + // Must serialize to at most 500 bytes. + // + // NOTE: Garbage collection executes opportunistically in the background, and + // so it's possible for reads to return a cell even if it matches the active + // GC expression for its family. + GcRule gc_rule = 3; +} + +// Rule for determining which cells to delete during garbage collection. +message GcRule { + // A GcRule which deletes cells matching all of the given rules. + message Intersection { + // Only delete cells which would be deleted by every element of `rules`. + repeated GcRule rules = 1; + } + + // A GcRule which deletes cells matching any of the given rules. + message Union { + // Delete cells which would be deleted by any element of `rules`. + repeated GcRule rules = 1; + } + + oneof rule { + // Delete all cells in a column except the most recent N. + int32 max_num_versions = 1; + + // Delete cells in a column older than the given age. + // Values must be at least one millisecond, and will be truncated to + // microsecond granularity. + google.protobuf.Duration max_age = 2; + + // Delete cells that would be deleted by every nested rule. + Intersection intersection = 3; + + // Delete cells that would be deleted by any nested rule. + Union union = 4; + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service.pb.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service.pb.go new file mode 100644 index 000000000..e0c67724a --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service.pb.go @@ -0,0 +1,293 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service.proto +// DO NOT EDIT! + +package google_bigtable_admin_table_v1 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_bigtable_admin_table_v11 "google.golang.org/cloud/bigtable/internal/table_data_proto" +import google_protobuf1 "google.golang.org/cloud/bigtable/internal/empty" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// Client API for BigtableTableService service + +type BigtableTableServiceClient interface { + // Creates a new table, to be served from a specified cluster. + // The table can be created with a full set of initial column families, + // specified in the request. + CreateTable(ctx context.Context, in *CreateTableRequest, opts ...grpc.CallOption) (*google_bigtable_admin_table_v11.Table, error) + // Lists the names of all tables served from a specified cluster. + ListTables(ctx context.Context, in *ListTablesRequest, opts ...grpc.CallOption) (*ListTablesResponse, error) + // Gets the schema of the specified table, including its column families. + GetTable(ctx context.Context, in *GetTableRequest, opts ...grpc.CallOption) (*google_bigtable_admin_table_v11.Table, error) + // Permanently deletes a specified table and all of its data. + DeleteTable(ctx context.Context, in *DeleteTableRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) + // Changes the name of a specified table. + // Cannot be used to move tables between clusters, zones, or projects. + RenameTable(ctx context.Context, in *RenameTableRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) + // Creates a new column family within a specified table. + CreateColumnFamily(ctx context.Context, in *CreateColumnFamilyRequest, opts ...grpc.CallOption) (*google_bigtable_admin_table_v11.ColumnFamily, error) + // Changes the configuration of a specified column family. + UpdateColumnFamily(ctx context.Context, in *google_bigtable_admin_table_v11.ColumnFamily, opts ...grpc.CallOption) (*google_bigtable_admin_table_v11.ColumnFamily, error) + // Permanently deletes a specified column family and all of its data. + DeleteColumnFamily(ctx context.Context, in *DeleteColumnFamilyRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) +} + +type bigtableTableServiceClient struct { + cc *grpc.ClientConn +} + +func NewBigtableTableServiceClient(cc *grpc.ClientConn) BigtableTableServiceClient { + return &bigtableTableServiceClient{cc} +} + +func (c *bigtableTableServiceClient) CreateTable(ctx context.Context, in *CreateTableRequest, opts ...grpc.CallOption) (*google_bigtable_admin_table_v11.Table, error) { + out := new(google_bigtable_admin_table_v11.Table) + err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/CreateTable", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableTableServiceClient) ListTables(ctx context.Context, in *ListTablesRequest, opts ...grpc.CallOption) (*ListTablesResponse, error) { + out := new(ListTablesResponse) + err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/ListTables", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableTableServiceClient) GetTable(ctx context.Context, in *GetTableRequest, opts ...grpc.CallOption) (*google_bigtable_admin_table_v11.Table, error) { + out := new(google_bigtable_admin_table_v11.Table) + err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/GetTable", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableTableServiceClient) DeleteTable(ctx context.Context, in *DeleteTableRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { + out := new(google_protobuf1.Empty) + err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/DeleteTable", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableTableServiceClient) RenameTable(ctx context.Context, in *RenameTableRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { + out := new(google_protobuf1.Empty) + err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/RenameTable", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableTableServiceClient) CreateColumnFamily(ctx context.Context, in *CreateColumnFamilyRequest, opts ...grpc.CallOption) (*google_bigtable_admin_table_v11.ColumnFamily, error) { + out := new(google_bigtable_admin_table_v11.ColumnFamily) + err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/CreateColumnFamily", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableTableServiceClient) UpdateColumnFamily(ctx context.Context, in *google_bigtable_admin_table_v11.ColumnFamily, opts ...grpc.CallOption) (*google_bigtable_admin_table_v11.ColumnFamily, error) { + out := new(google_bigtable_admin_table_v11.ColumnFamily) + err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/UpdateColumnFamily", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableTableServiceClient) DeleteColumnFamily(ctx context.Context, in *DeleteColumnFamilyRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { + out := new(google_protobuf1.Empty) + err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/DeleteColumnFamily", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for BigtableTableService service + +type BigtableTableServiceServer interface { + // Creates a new table, to be served from a specified cluster. + // The table can be created with a full set of initial column families, + // specified in the request. + CreateTable(context.Context, *CreateTableRequest) (*google_bigtable_admin_table_v11.Table, error) + // Lists the names of all tables served from a specified cluster. + ListTables(context.Context, *ListTablesRequest) (*ListTablesResponse, error) + // Gets the schema of the specified table, including its column families. + GetTable(context.Context, *GetTableRequest) (*google_bigtable_admin_table_v11.Table, error) + // Permanently deletes a specified table and all of its data. + DeleteTable(context.Context, *DeleteTableRequest) (*google_protobuf1.Empty, error) + // Changes the name of a specified table. + // Cannot be used to move tables between clusters, zones, or projects. + RenameTable(context.Context, *RenameTableRequest) (*google_protobuf1.Empty, error) + // Creates a new column family within a specified table. + CreateColumnFamily(context.Context, *CreateColumnFamilyRequest) (*google_bigtable_admin_table_v11.ColumnFamily, error) + // Changes the configuration of a specified column family. + UpdateColumnFamily(context.Context, *google_bigtable_admin_table_v11.ColumnFamily) (*google_bigtable_admin_table_v11.ColumnFamily, error) + // Permanently deletes a specified column family and all of its data. + DeleteColumnFamily(context.Context, *DeleteColumnFamilyRequest) (*google_protobuf1.Empty, error) +} + +func RegisterBigtableTableServiceServer(s *grpc.Server, srv BigtableTableServiceServer) { + s.RegisterService(&_BigtableTableService_serviceDesc, srv) +} + +func _BigtableTableService_CreateTable_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { + in := new(CreateTableRequest) + if err := codec.Unmarshal(buf, in); err != nil { + return nil, err + } + out, err := srv.(BigtableTableServiceServer).CreateTable(ctx, in) + if err != nil { + return nil, err + } + return out, nil +} + +func _BigtableTableService_ListTables_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { + in := new(ListTablesRequest) + if err := codec.Unmarshal(buf, in); err != nil { + return nil, err + } + out, err := srv.(BigtableTableServiceServer).ListTables(ctx, in) + if err != nil { + return nil, err + } + return out, nil +} + +func _BigtableTableService_GetTable_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { + in := new(GetTableRequest) + if err := codec.Unmarshal(buf, in); err != nil { + return nil, err + } + out, err := srv.(BigtableTableServiceServer).GetTable(ctx, in) + if err != nil { + return nil, err + } + return out, nil +} + +func _BigtableTableService_DeleteTable_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { + in := new(DeleteTableRequest) + if err := codec.Unmarshal(buf, in); err != nil { + return nil, err + } + out, err := srv.(BigtableTableServiceServer).DeleteTable(ctx, in) + if err != nil { + return nil, err + } + return out, nil +} + +func _BigtableTableService_RenameTable_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { + in := new(RenameTableRequest) + if err := codec.Unmarshal(buf, in); err != nil { + return nil, err + } + out, err := srv.(BigtableTableServiceServer).RenameTable(ctx, in) + if err != nil { + return nil, err + } + return out, nil +} + +func _BigtableTableService_CreateColumnFamily_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { + in := new(CreateColumnFamilyRequest) + if err := codec.Unmarshal(buf, in); err != nil { + return nil, err + } + out, err := srv.(BigtableTableServiceServer).CreateColumnFamily(ctx, in) + if err != nil { + return nil, err + } + return out, nil +} + +func _BigtableTableService_UpdateColumnFamily_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { + in := new(google_bigtable_admin_table_v11.ColumnFamily) + if err := codec.Unmarshal(buf, in); err != nil { + return nil, err + } + out, err := srv.(BigtableTableServiceServer).UpdateColumnFamily(ctx, in) + if err != nil { + return nil, err + } + return out, nil +} + +func _BigtableTableService_DeleteColumnFamily_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { + in := new(DeleteColumnFamilyRequest) + if err := codec.Unmarshal(buf, in); err != nil { + return nil, err + } + out, err := srv.(BigtableTableServiceServer).DeleteColumnFamily(ctx, in) + if err != nil { + return nil, err + } + return out, nil +} + +var _BigtableTableService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.bigtable.admin.table.v1.BigtableTableService", + HandlerType: (*BigtableTableServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateTable", + Handler: _BigtableTableService_CreateTable_Handler, + }, + { + MethodName: "ListTables", + Handler: _BigtableTableService_ListTables_Handler, + }, + { + MethodName: "GetTable", + Handler: _BigtableTableService_GetTable_Handler, + }, + { + MethodName: "DeleteTable", + Handler: _BigtableTableService_DeleteTable_Handler, + }, + { + MethodName: "RenameTable", + Handler: _BigtableTableService_RenameTable_Handler, + }, + { + MethodName: "CreateColumnFamily", + Handler: _BigtableTableService_CreateColumnFamily_Handler, + }, + { + MethodName: "UpdateColumnFamily", + Handler: _BigtableTableService_UpdateColumnFamily_Handler, + }, + { + MethodName: "DeleteColumnFamily", + Handler: _BigtableTableService_DeleteColumnFamily_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service.proto b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service.proto new file mode 100644 index 000000000..1ccdfa24a --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service.proto @@ -0,0 +1,65 @@ +// Copyright (c) 2015, Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.admin.table.v1; + +import "google.golang.org/cloud/bigtable/internal/table_data_proto/bigtable_table_data.proto"; +import "google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service_messages.proto"; +import "google.golang.org/cloud/bigtable/internal/empty/empty.proto"; + +option java_multiple_files = true; +option java_outer_classname = "BigtableTableServicesProto"; +option java_package = "com.google.bigtable.admin.table.v1"; + + +// Service for creating, configuring, and deleting Cloud Bigtable tables. +// Provides access to the table schemas only, not the data stored within the tables. +service BigtableTableService { + // Creates a new table, to be served from a specified cluster. + // The table can be created with a full set of initial column families, + // specified in the request. + rpc CreateTable(CreateTableRequest) returns (Table) { + } + + // Lists the names of all tables served from a specified cluster. + rpc ListTables(ListTablesRequest) returns (ListTablesResponse) { + } + + // Gets the schema of the specified table, including its column families. + rpc GetTable(GetTableRequest) returns (Table) { + } + + // Permanently deletes a specified table and all of its data. + rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty) { + } + + // Changes the name of a specified table. + // Cannot be used to move tables between clusters, zones, or projects. + rpc RenameTable(RenameTableRequest) returns (google.protobuf.Empty) { + } + + // Creates a new column family within a specified table. + rpc CreateColumnFamily(CreateColumnFamilyRequest) returns (ColumnFamily) { + } + + // Changes the configuration of a specified column family. + rpc UpdateColumnFamily(ColumnFamily) returns (ColumnFamily) { + } + + // Permanently deletes a specified column family and all of its data. + rpc DeleteColumnFamily(DeleteColumnFamilyRequest) returns (google.protobuf.Empty) { + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service_messages.pb.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service_messages.pb.go new file mode 100644 index 000000000..268f4ea0a --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service_messages.pb.go @@ -0,0 +1,156 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service_messages.proto +// DO NOT EDIT! + +/* +Package google_bigtable_admin_table_v1 is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service_messages.proto + google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service.proto + +It has these top-level messages: + CreateTableRequest + ListTablesRequest + ListTablesResponse + GetTableRequest + DeleteTableRequest + RenameTableRequest + CreateColumnFamilyRequest + DeleteColumnFamilyRequest +*/ +package google_bigtable_admin_table_v1 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_bigtable_admin_table_v11 "google.golang.org/cloud/bigtable/internal/table_data_proto" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type CreateTableRequest struct { + // The unique name of the cluster in which to create the new table. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The name by which the new table should be referred to within the cluster, + // e.g. "foobar" rather than "/tables/foobar". + TableId string `protobuf:"bytes,2,opt,name=table_id" json:"table_id,omitempty"` + // The Table to create. The `name` field of the Table and all of its + // ColumnFamilies must be left blank, and will be populated in the response. + Table *google_bigtable_admin_table_v11.Table `protobuf:"bytes,3,opt,name=table" json:"table,omitempty"` + // The optional list of row keys that will be used to initially split the + // table into several tablets (Tablets are similar to HBase regions). + // Given two split keys, "s1" and "s2", three tablets will be created, + // spanning the key ranges: [, s1), [s1, s2), [s2, ). + // + // Example: + // * Row keys := ["a", "apple", "custom", "customer_1", "customer_2", + // "other", "zz"] + // * initial_split_keys := ["apple", "customer_1", "customer_2", "other"] + // * Key assignment: + // - Tablet 1 [, apple) => {"a"}. + // - Tablet 2 [apple, customer_1) => {"apple", "custom"}. + // - Tablet 3 [customer_1, customer_2) => {"customer_1"}. + // - Tablet 4 [customer_2, other) => {"customer_2"}. + // - Tablet 5 [other, ) => {"other", "zz"}. + InitialSplitKeys []string `protobuf:"bytes,4,rep,name=initial_split_keys" json:"initial_split_keys,omitempty"` +} + +func (m *CreateTableRequest) Reset() { *m = CreateTableRequest{} } +func (m *CreateTableRequest) String() string { return proto.CompactTextString(m) } +func (*CreateTableRequest) ProtoMessage() {} + +func (m *CreateTableRequest) GetTable() *google_bigtable_admin_table_v11.Table { + if m != nil { + return m.Table + } + return nil +} + +type ListTablesRequest struct { + // The unique name of the cluster for which tables should be listed. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *ListTablesRequest) Reset() { *m = ListTablesRequest{} } +func (m *ListTablesRequest) String() string { return proto.CompactTextString(m) } +func (*ListTablesRequest) ProtoMessage() {} + +type ListTablesResponse struct { + // The tables present in the requested cluster. + // At present, only the names of the tables are populated. + Tables []*google_bigtable_admin_table_v11.Table `protobuf:"bytes,1,rep,name=tables" json:"tables,omitempty"` +} + +func (m *ListTablesResponse) Reset() { *m = ListTablesResponse{} } +func (m *ListTablesResponse) String() string { return proto.CompactTextString(m) } +func (*ListTablesResponse) ProtoMessage() {} + +func (m *ListTablesResponse) GetTables() []*google_bigtable_admin_table_v11.Table { + if m != nil { + return m.Tables + } + return nil +} + +type GetTableRequest struct { + // The unique name of the requested table. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetTableRequest) Reset() { *m = GetTableRequest{} } +func (m *GetTableRequest) String() string { return proto.CompactTextString(m) } +func (*GetTableRequest) ProtoMessage() {} + +type DeleteTableRequest struct { + // The unique name of the table to be deleted. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteTableRequest) Reset() { *m = DeleteTableRequest{} } +func (m *DeleteTableRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteTableRequest) ProtoMessage() {} + +type RenameTableRequest struct { + // The current unique name of the table. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The new name by which the table should be referred to within its containing + // cluster, e.g. "foobar" rather than "/tables/foobar". + NewId string `protobuf:"bytes,2,opt,name=new_id" json:"new_id,omitempty"` +} + +func (m *RenameTableRequest) Reset() { *m = RenameTableRequest{} } +func (m *RenameTableRequest) String() string { return proto.CompactTextString(m) } +func (*RenameTableRequest) ProtoMessage() {} + +type CreateColumnFamilyRequest struct { + // The unique name of the table in which to create the new column family. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The name by which the new column family should be referred to within the + // table, e.g. "foobar" rather than "/columnFamilies/foobar". + ColumnFamilyId string `protobuf:"bytes,2,opt,name=column_family_id" json:"column_family_id,omitempty"` + // The column family to create. The `name` field must be left blank. + ColumnFamily *google_bigtable_admin_table_v11.ColumnFamily `protobuf:"bytes,3,opt,name=column_family" json:"column_family,omitempty"` +} + +func (m *CreateColumnFamilyRequest) Reset() { *m = CreateColumnFamilyRequest{} } +func (m *CreateColumnFamilyRequest) String() string { return proto.CompactTextString(m) } +func (*CreateColumnFamilyRequest) ProtoMessage() {} + +func (m *CreateColumnFamilyRequest) GetColumnFamily() *google_bigtable_admin_table_v11.ColumnFamily { + if m != nil { + return m.ColumnFamily + } + return nil +} + +type DeleteColumnFamilyRequest struct { + // The unique name of the column family to be deleted. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteColumnFamilyRequest) Reset() { *m = DeleteColumnFamilyRequest{} } +func (m *DeleteColumnFamilyRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteColumnFamilyRequest) ProtoMessage() {} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service_messages.proto b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service_messages.proto new file mode 100644 index 000000000..9fa1b6a1c --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service_messages.proto @@ -0,0 +1,101 @@ +// Copyright (c) 2015, Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.admin.table.v1; + +import "google.golang.org/cloud/bigtable/internal/table_data_proto/bigtable_table_data.proto"; + +option java_multiple_files = true; +option java_outer_classname = "BigtableTableServiceMessagesProto"; +option java_package = "com.google.bigtable.admin.table.v1"; + + +message CreateTableRequest { + // The unique name of the cluster in which to create the new table. + string name = 1; + + // The name by which the new table should be referred to within the cluster, + // e.g. "foobar" rather than "/tables/foobar". + string table_id = 2; + + // The Table to create. The `name` field of the Table and all of its + // ColumnFamilies must be left blank, and will be populated in the response. + Table table = 3; + + // The optional list of row keys that will be used to initially split the + // table into several tablets (Tablets are similar to HBase regions). + // Given two split keys, "s1" and "s2", three tablets will be created, + // spanning the key ranges: [, s1), [s1, s2), [s2, ). + // + // Example: + // * Row keys := ["a", "apple", "custom", "customer_1", "customer_2", + // "other", "zz"] + // * initial_split_keys := ["apple", "customer_1", "customer_2", "other"] + // * Key assignment: + // - Tablet 1 [, apple) => {"a"}. + // - Tablet 2 [apple, customer_1) => {"apple", "custom"}. + // - Tablet 3 [customer_1, customer_2) => {"customer_1"}. + // - Tablet 4 [customer_2, other) => {"customer_2"}. + // - Tablet 5 [other, ) => {"other", "zz"}. + repeated string initial_split_keys = 4; +} + +message ListTablesRequest { + // The unique name of the cluster for which tables should be listed. + string name = 1; +} + +message ListTablesResponse { + // The tables present in the requested cluster. + // At present, only the names of the tables are populated. + repeated Table tables = 1; +} + +message GetTableRequest { + // The unique name of the requested table. + string name = 1; +} + +message DeleteTableRequest { + // The unique name of the table to be deleted. + string name = 1; +} + +message RenameTableRequest { + // The current unique name of the table. + string name = 1; + + // The new name by which the table should be referred to within its containing + // cluster, e.g. "foobar" rather than "/tables/foobar". + string new_id = 2; +} + +message CreateColumnFamilyRequest { + // The unique name of the table in which to create the new column family. + string name = 1; + + // The name by which the new column family should be referred to within the + // table, e.g. "foobar" rather than "/columnFamilies/foobar". + string column_family_id = 2; + + // The column family to create. The `name` field must be left blank. + ColumnFamily column_family = 3; +} + +message DeleteColumnFamilyRequest { + // The unique name of the column family to be deleted. + string name = 1; +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/cloud.go b/Godeps/_workspace/src/google.golang.org/cloud/cloud.go new file mode 100644 index 000000000..a634b0552 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/cloud.go @@ -0,0 +1,49 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package cloud contains Google Cloud Platform APIs related types +// and common functions. +package cloud + +import ( + "net/http" + + "golang.org/x/net/context" + "google.golang.org/cloud/internal" +) + +// NewContext returns a new context that uses the provided http.Client. +// Provided http.Client is responsible to authorize and authenticate +// the requests made to the Google Cloud APIs. +// It mutates the client's original Transport to append the cloud +// package's user-agent to the outgoing requests. +// You can obtain the project ID from the Google Developers Console, +// https://console.developers.google.com. +func NewContext(projID string, c *http.Client) context.Context { + if c == nil { + panic("invalid nil *http.Client passed to NewContext") + } + return WithContext(context.Background(), projID, c) +} + +// WithContext returns a new context in a similar way NewContext does, +// but initiates the new context with the specified parent. +func WithContext(parent context.Context, projID string, c *http.Client) context.Context { + // TODO(bradfitz): delete internal.Transport. It's too wrappy for what it does. + // Do User-Agent some other way. + if _, ok := c.Transport.(*internal.Transport); !ok { + c.Transport = &internal.Transport{Base: c.Transport} + } + return internal.WithContext(parent, projID, c) +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/cloud_test.go b/Godeps/_workspace/src/google.golang.org/cloud/cloud_test.go new file mode 100644 index 000000000..2561fb030 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/cloud_test.go @@ -0,0 +1,36 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloud + +import ( + "net/http" + "testing" + + "google.golang.org/cloud/internal" +) + +func TestClientTransportMutate(t *testing.T) { + c := &http.Client{Transport: http.DefaultTransport} + NewContext("project-id", c) + NewContext("project-id", c) + + tr, ok := c.Transport.(*internal.Transport) + if !ok { + t.Errorf("Transport is expected to be an internal.Transport, found to be a %T", c.Transport) + } + if _, ok := tr.Base.(*internal.Transport); ok { + t.Errorf("Transport's Base shouldn't have been an internal.Transport, found to be a %T", tr.Base) + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/metadata.go b/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/metadata.go new file mode 100644 index 000000000..b007cde63 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/metadata.go @@ -0,0 +1,279 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metadata provides access to Google Compute Engine (GCE) +// metadata and API service accounts. +// +// This package is a wrapper around the GCE metadata service, +// as documented at https://developers.google.com/compute/docs/metadata. +package metadata + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "os" + "strings" + "sync" + "time" + + "google.golang.org/cloud/internal" +) + +type cachedValue struct { + k string + trim bool + mu sync.Mutex + v string +} + +var ( + projID = &cachedValue{k: "project/project-id", trim: true} + projNum = &cachedValue{k: "project/numeric-project-id", trim: true} + instID = &cachedValue{k: "instance/id", trim: true} +) + +var metaClient = &http.Client{ + Transport: &internal.Transport{ + Base: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 750 * time.Millisecond, + KeepAlive: 30 * time.Second, + }).Dial, + ResponseHeaderTimeout: 750 * time.Millisecond, + }, + }, +} + +// NotDefinedError is returned when requested metadata is not defined. +// +// The underlying string is the suffix after "/computeMetadata/v1/". +// +// This error is not returned if the value is defined to be the empty +// string. +type NotDefinedError string + +func (suffix NotDefinedError) Error() string { + return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) +} + +// Get returns a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// If the GCE_METADATA_HOST environment variable is not defined, a default of +// 169.254.169.254 will be used instead. +// +// If the requested metadata is not defined, the returned error will +// be of type NotDefinedError. +func Get(suffix string) (string, error) { + // Using a fixed IP makes it very difficult to spoof the metadata service in + // a container, which is an important use-case for local testing of cloud + // deployments. To enable spoofing of the metadata service, the environment + // variable GCE_METADATA_HOST is first inspected to decide where metadata + // requests shall go. + host := os.Getenv("GCE_METADATA_HOST") + if host == "" { + // Using 169.254.169.254 instead of "metadata" here because Go + // binaries built with the "netgo" tag and without cgo won't + // know the search suffix for "metadata" is + // ".google.internal", and this IP address is documented as + // being stable anyway. + host = "169.254.169.254" + } + url := "http://" + host + "/computeMetadata/v1/" + suffix + req, _ := http.NewRequest("GET", url, nil) + req.Header.Set("Metadata-Flavor", "Google") + res, err := metaClient.Do(req) + if err != nil { + return "", err + } + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + return "", NotDefinedError(suffix) + } + if res.StatusCode != 200 { + return "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url) + } + all, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", err + } + return string(all), nil +} + +func getTrimmed(suffix string) (s string, err error) { + s, err = Get(suffix) + s = strings.TrimSpace(s) + return +} + +func (c *cachedValue) get() (v string, err error) { + defer c.mu.Unlock() + c.mu.Lock() + if c.v != "" { + return c.v, nil + } + if c.trim { + v, err = getTrimmed(c.k) + } else { + v, err = Get(c.k) + } + if err == nil { + c.v = v + } + return +} + +var onGCE struct { + sync.Mutex + set bool + v bool +} + +// OnGCE reports whether this process is running on Google Compute Engine. +func OnGCE() bool { + defer onGCE.Unlock() + onGCE.Lock() + if onGCE.set { + return onGCE.v + } + onGCE.set = true + + // We use the DNS name of the metadata service here instead of the IP address + // because we expect that to fail faster in the not-on-GCE case. + res, err := metaClient.Get("http://metadata.google.internal") + if err != nil { + return false + } + onGCE.v = res.Header.Get("Metadata-Flavor") == "Google" + return onGCE.v +} + +// ProjectID returns the current instance's project ID string. +func ProjectID() (string, error) { return projID.get() } + +// NumericProjectID returns the current instance's numeric project ID. +func NumericProjectID() (string, error) { return projNum.get() } + +// InternalIP returns the instance's primary internal IP address. +func InternalIP() (string, error) { + return getTrimmed("instance/network-interfaces/0/ip") +} + +// ExternalIP returns the instance's primary external (public) IP address. +func ExternalIP() (string, error) { + return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") +} + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func Hostname() (string, error) { + return getTrimmed("instance/hostname") +} + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func InstanceTags() ([]string, error) { + var s []string + j, err := Get("instance/tags") + if err != nil { + return nil, err + } + if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { + return nil, err + } + return s, nil +} + +// InstanceID returns the current VM's numeric instance ID. +func InstanceID() (string, error) { + return instID.get() +} + +// InstanceName returns the current VM's instance ID string. +func InstanceName() (string, error) { + host, err := Hostname() + if err != nil { + return "", err + } + return strings.Split(host, ".")[0], nil +} + +// Zone returns the current VM's zone, such as "us-central1-b". +func Zone() (string, error) { + zone, err := getTrimmed("instance/zone") + // zone is of the form "projects//zones/". + if err != nil { + return "", err + } + return zone[strings.LastIndex(zone, "/")+1:], nil +} + +// InstanceAttributes returns the list of user-defined attributes, +// assigned when initially creating a GCE VM instance. The value of an +// attribute can be obtained with InstanceAttributeValue. +func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") } + +// ProjectAttributes returns the list of user-defined attributes +// applying to the project as a whole, not just this VM. The value of +// an attribute can be obtained with ProjectAttributeValue. +func ProjectAttributes() ([]string, error) { return lines("project/attributes/") } + +func lines(suffix string) ([]string, error) { + j, err := Get(suffix) + if err != nil { + return nil, err + } + s := strings.Split(strings.TrimSpace(j), "\n") + for i := range s { + s[i] = strings.TrimSpace(s[i]) + } + return s, nil +} + +// InstanceAttributeValue returns the value of the provided VM +// instance attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// InstanceAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func InstanceAttributeValue(attr string) (string, error) { + return Get("instance/attributes/" + attr) +} + +// ProjectAttributeValue returns the value of the provided +// project attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// ProjectAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func ProjectAttributeValue(attr string) (string, error) { + return Get("project/attributes/" + attr) +} + +// Scopes returns the service account scopes for the given account. +// The account may be empty or the string "default" to use the instance's +// main account. +func Scopes(serviceAccount string) ([]string, error) { + if serviceAccount == "" { + serviceAccount = "default" + } + return lines("instance/service-accounts/" + serviceAccount + "/scopes") +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/container/container.go b/Godeps/_workspace/src/google.golang.org/cloud/container/container.go new file mode 100644 index 000000000..86987a5c1 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/container/container.go @@ -0,0 +1,255 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package container contains a Google Container Engine client. +// +// For more information about the API, +// see https://cloud.google.com/container-engine/docs +package container + +import ( + "errors" + "net/http" + "time" + + "golang.org/x/net/context" + raw "google.golang.org/api/container/v1beta1" + "google.golang.org/cloud/internal" +) + +type Type string + +var ( + TypeCreate Type = Type("createCluster") + TypeDelete Type = Type("deleteCluster") +) + +type Status string + +var ( + Done = Status("done") + Pending = Status("pending") + Running = Status("running") + Error = Status("error") + Provisioning = Status("provisioning") + Stopping = Status("stopping") +) + +// Resource is a Google Container Engine cluster resource. +type Resource struct { + // Name is the name of this cluster. The name must be unique + // within this project and zone, and can be up to 40 characters. + Name string + + // Description is the description of the cluster. Optional. + Description string + + // Zone is the Google Compute Engine zone in which the cluster resides. + Zone string + + // Status is the current status of the cluster. It could either be + // StatusError, StatusProvisioning, StatusRunning or StatusStopping. + Status Status + + // Num is the number of the nodes in this cluster resource. + Num int64 + + // APIVersion is the version of the Kubernetes master and kubelets running + // in this cluster. Allowed value is 0.4.2, or leave blank to + // pick up the latest stable release. + APIVersion string + + // Endpoint is the IP address of this cluster's Kubernetes master. + // The endpoint can be accessed at https://username:password@endpoint/. + // See Username and Password fields for the username and password information. + Endpoint string + + // Username is the username to use when accessing the Kubernetes master endpoint. + Username string + + // Password is the password to use when accessing the Kubernetes master endpoint. + Password string + + // ContainerIPv4CIDR is the IP addresses of the container pods in + // this cluster, in CIDR notation (e.g. 1.2.3.4/29). + ContainerIPv4CIDR string + + // ServicesIPv4CIDR is the IP addresses of the Kubernetes services in this + // cluster, in CIDR notation (e.g. 1.2.3.4/29). Service addresses are + // always in the 10.0.0.0/16 range. + ServicesIPv4CIDR string + + // MachineType is a Google Compute Engine machine type (e.g. n1-standard-1). + // If none set, the default type is used while creating a new cluster. + MachineType string + + // SourceImage is the fully-specified name of a Google Compute Engine image. + // For example: https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/backports-debian-7-wheezy-vYYYYMMDD. + SourceImage string + + // Created is the creation time of this cluster. + Created time.Time +} + +func resourceFromRaw(c *raw.Cluster) *Resource { + if c == nil { + return nil + } + r := &Resource{ + Name: c.Name, + Description: c.Description, + Zone: c.Zone, + Status: Status(c.Status), + Num: c.NumNodes, + APIVersion: c.ClusterApiVersion, + Endpoint: c.Endpoint, + Username: c.MasterAuth.User, + Password: c.MasterAuth.Password, + ContainerIPv4CIDR: c.ContainerIpv4Cidr, + ServicesIPv4CIDR: c.ServicesIpv4Cidr, + MachineType: c.NodeConfig.MachineType, + SourceImage: c.NodeConfig.SourceImage, + } + r.Created, _ = time.Parse(time.RFC3339, c.CreationTimestamp) + return r +} + +func resourcesFromRaw(c []*raw.Cluster) []*Resource { + r := make([]*Resource, len(c)) + for i, val := range c { + r[i] = resourceFromRaw(val) + } + return r +} + +// Op represents a Google Container Engine API operation. +type Op struct { + // Name is the name of the operation. + Name string + + // Zone is the Google Compute Engine zone. + Zone string + + // TargetURL is the URL of the cluster resource + // that this operation is associated with. + TargetURL string + + // Type is the operation type. It could be either be TypeCreate or TypeDelete. + Type Type + + // Status is the current status of this operation. It could be either + // OpDone or OpPending. + Status Status +} + +func opFromRaw(o *raw.Operation) *Op { + if o == nil { + return nil + } + return &Op{ + Name: o.Name, + Zone: o.Zone, + TargetURL: o.Target, + Type: Type(o.OperationType), + Status: Status(o.Status), + } +} + +func opsFromRaw(o []*raw.Operation) []*Op { + ops := make([]*Op, len(o)) + for i, val := range o { + ops[i] = opFromRaw(val) + } + return ops +} + +// Clusters returns a list of cluster resources from the specified zone. +// If no zone is specified, it returns all clusters under the user project. +func Clusters(ctx context.Context, zone string) ([]*Resource, error) { + s := rawService(ctx) + if zone == "" { + resp, err := s.Projects.Clusters.List(internal.ProjID(ctx)).Do() + if err != nil { + return nil, err + } + return resourcesFromRaw(resp.Clusters), nil + } + resp, err := s.Projects.Zones.Clusters.List(internal.ProjID(ctx), zone).Do() + if err != nil { + return nil, err + } + return resourcesFromRaw(resp.Clusters), nil +} + +// Cluster returns metadata about the specified cluster. +func Cluster(ctx context.Context, zone, name string) (*Resource, error) { + s := rawService(ctx) + resp, err := s.Projects.Zones.Clusters.Get(internal.ProjID(ctx), zone, name).Do() + if err != nil { + return nil, err + } + return resourceFromRaw(resp), nil +} + +// CreateCluster creates a new cluster with the provided metadata +// in the specified zone. +func CreateCluster(ctx context.Context, zone string, resource *Resource) (*Resource, error) { + panic("not implemented") +} + +// DeleteCluster deletes a cluster. +func DeleteCluster(ctx context.Context, zone, name string) error { + s := rawService(ctx) + _, err := s.Projects.Zones.Clusters.Delete(internal.ProjID(ctx), zone, name).Do() + return err +} + +// Operations returns a list of operations from the specified zone. +// If no zone is specified, it looks up for all of the operations +// that are running under the user's project. +func Operations(ctx context.Context, zone string) ([]*Op, error) { + s := rawService(ctx) + if zone == "" { + resp, err := s.Projects.Operations.List(internal.ProjID(ctx)).Do() + if err != nil { + return nil, err + } + return opsFromRaw(resp.Operations), nil + } + resp, err := s.Projects.Zones.Operations.List(internal.ProjID(ctx), zone).Do() + if err != nil { + return nil, err + } + return opsFromRaw(resp.Operations), nil +} + +// Operation returns an operation. +func Operation(ctx context.Context, zone, name string) (*Op, error) { + s := rawService(ctx) + resp, err := s.Projects.Zones.Operations.Get(internal.ProjID(ctx), zone, name).Do() + if err != nil { + return nil, err + } + if resp.ErrorMessage != "" { + return nil, errors.New(resp.ErrorMessage) + } + return opFromRaw(resp), nil +} + +func rawService(ctx context.Context) *raw.Service { + return internal.Service(ctx, "container", func(hc *http.Client) interface{} { + svc, _ := raw.New(hc) + return svc + }).(*raw.Service) +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/datastore/datastore.go b/Godeps/_workspace/src/google.golang.org/cloud/datastore/datastore.go new file mode 100644 index 000000000..964f01f91 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/datastore/datastore.go @@ -0,0 +1,496 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package datastore contains a Google Cloud Datastore client. +// +// This package is experimental and may make backwards-incompatible changes. +package datastore + +import ( + "errors" + "fmt" + "reflect" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "google.golang.org/cloud" + pb "google.golang.org/cloud/internal/datastore" + "google.golang.org/cloud/internal/transport" +) + +const prodAddr = "https://www.googleapis.com/datastore/v1beta2/datasets/" + +const userAgent = "gcloud-golang-datastore/20150727" + +const ( + // ScopeDatastore grants permissions to view and/or manage datastore entities + ScopeDatastore = "https://www.googleapis.com/auth/datastore" + + // ScopeUserEmail grants permission to view the user's email address. + // It is required to access the datastore. + ScopeUserEmail = "https://www.googleapis.com/auth/userinfo.email" +) + +// protoClient is an interface for *transport.ProtoClient to support injecting +// fake clients in tests. +type protoClient interface { + Call(context.Context, string, proto.Message, proto.Message) error +} + +// Client is a client for reading and writing data in a datastore dataset. +type Client struct { + client protoClient + endpoint string + dataset string // Called dataset by the datastore API, synonym for project ID. +} + +// NewClient creates a new Client for a given dataset. +func NewClient(ctx context.Context, projectID string, opts ...cloud.ClientOption) (*Client, error) { + o := []cloud.ClientOption{ + cloud.WithEndpoint(prodAddr), + cloud.WithScopes(ScopeDatastore, ScopeUserEmail), + cloud.WithUserAgent(userAgent), + } + o = append(o, opts...) + client, err := transport.NewProtoClient(ctx, o...) + if err != nil { + return nil, fmt.Errorf("dialing: %v", err) + } + return &Client{ + client: client, + dataset: projectID, + }, nil + +} + +var ( + // ErrInvalidEntityType is returned when functions like Get or Next are + // passed a dst or src argument of invalid type. + ErrInvalidEntityType = errors.New("datastore: invalid entity type") + // ErrInvalidKey is returned when an invalid key is presented. + ErrInvalidKey = errors.New("datastore: invalid key") + // ErrNoSuchEntity is returned when no entity was found for a given key. + ErrNoSuchEntity = errors.New("datastore: no such entity") +) + +type multiArgType int + +const ( + multiArgTypeInvalid multiArgType = iota + multiArgTypePropertyLoadSaver + multiArgTypeStruct + multiArgTypeStructPtr + multiArgTypeInterface +) + +// nsKey is the type of the context.Context key to store the datastore +// namespace. +type nsKey struct{} + +// WithNamespace returns a new context that limits the scope its parent +// context with a Datastore namespace. +func WithNamespace(parent context.Context, namespace string) context.Context { + return context.WithValue(parent, nsKey{}, namespace) +} + +// ctxNamespace returns the active namespace for a context. +// It defaults to "" if no namespace was specified. +func ctxNamespace(ctx context.Context) string { + v, _ := ctx.Value(nsKey{}).(string) + return v +} + +// ErrFieldMismatch is returned when a field is to be loaded into a different +// type than the one it was stored from, or when a field is missing or +// unexported in the destination struct. +// StructType is the type of the struct pointed to by the destination argument +// passed to Get or to Iterator.Next. +type ErrFieldMismatch struct { + StructType reflect.Type + FieldName string + Reason string +} + +func (e *ErrFieldMismatch) Error() string { + return fmt.Sprintf("datastore: cannot load field %q into a %q: %s", + e.FieldName, e.StructType, e.Reason) +} + +func (c *Client) call(ctx context.Context, method string, req, resp proto.Message) error { + return c.client.Call(ctx, c.dataset+"/"+method, req, resp) +} + +func keyToProto(k *Key) *pb.Key { + if k == nil { + return nil + } + + // TODO(jbd): Eliminate unrequired allocations. + path := []*pb.Key_PathElement(nil) + for { + el := &pb.Key_PathElement{ + Kind: proto.String(k.kind), + } + if k.id != 0 { + el.Id = proto.Int64(k.id) + } + if k.name != "" { + el.Name = proto.String(k.name) + } + path = append([]*pb.Key_PathElement{el}, path...) + if k.parent == nil { + break + } + k = k.parent + } + key := &pb.Key{ + PathElement: path, + } + if k.namespace != "" { + key.PartitionId = &pb.PartitionId{ + Namespace: proto.String(k.namespace), + } + } + return key +} + +func protoToKey(p *pb.Key) *Key { + keys := make([]*Key, len(p.GetPathElement())) + for i, el := range p.GetPathElement() { + keys[i] = &Key{ + namespace: p.GetPartitionId().GetNamespace(), + kind: el.GetKind(), + id: el.GetId(), + name: el.GetName(), + } + } + for i := 0; i < len(keys)-1; i++ { + keys[i+1].parent = keys[i] + } + return keys[len(keys)-1] +} + +// multiKeyToProto is a batch version of keyToProto. +func multiKeyToProto(keys []*Key) []*pb.Key { + ret := make([]*pb.Key, len(keys)) + for i, k := range keys { + ret[i] = keyToProto(k) + } + return ret +} + +// multiKeyToProto is a batch version of keyToProto. +func multiProtoToKey(keys []*pb.Key) []*Key { + ret := make([]*Key, len(keys)) + for i, k := range keys { + ret[i] = protoToKey(k) + } + return ret +} + +// multiValid is a batch version of Key.valid. It returns an error, not a +// []bool. +func multiValid(key []*Key) error { + invalid := false + for _, k := range key { + if !k.valid() { + invalid = true + break + } + } + if !invalid { + return nil + } + err := make(MultiError, len(key)) + for i, k := range key { + if !k.valid() { + err[i] = ErrInvalidKey + } + } + return err +} + +// checkMultiArg checks that v has type []S, []*S, []I, or []P, for some struct +// type S, for some interface type I, or some non-interface non-pointer type P +// such that P or *P implements PropertyLoadSaver. +// +// It returns what category the slice's elements are, and the reflect.Type +// that represents S, I or P. +// +// As a special case, PropertyList is an invalid type for v. +func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) { + if v.Kind() != reflect.Slice { + return multiArgTypeInvalid, nil + } + if v.Type() == typeOfPropertyList { + return multiArgTypeInvalid, nil + } + elemType = v.Type().Elem() + if reflect.PtrTo(elemType).Implements(typeOfPropertyLoadSaver) { + return multiArgTypePropertyLoadSaver, elemType + } + switch elemType.Kind() { + case reflect.Struct: + return multiArgTypeStruct, elemType + case reflect.Interface: + return multiArgTypeInterface, elemType + case reflect.Ptr: + elemType = elemType.Elem() + if elemType.Kind() == reflect.Struct { + return multiArgTypeStructPtr, elemType + } + } + return multiArgTypeInvalid, nil +} + +// Get loads the entity stored for key into dst, which must be a struct pointer +// or implement PropertyLoadSaver. If there is no such entity for the key, Get +// returns ErrNoSuchEntity. +// +// The values of dst's unmatched struct fields are not modified, and matching +// slice-typed fields are not reset before appending to them. In particular, it +// is recommended to pass a pointer to a zero valued struct on each Get call. +// +// ErrFieldMismatch is returned when a field is to be loaded into a different +// type than the one it was stored from, or when a field is missing or +// unexported in the destination struct. ErrFieldMismatch is only returned if +// dst is a struct pointer. +func (c *Client) Get(ctx context.Context, key *Key, dst interface{}) error { + err := c.get(ctx, []*Key{key}, []interface{}{dst}, nil) + if me, ok := err.(MultiError); ok { + return me[0] + } + return err +} + +// GetMulti is a batch version of Get. +// +// dst must be a []S, []*S, []I or []P, for some struct type S, some interface +// type I, or some non-interface non-pointer type P such that P or *P +// implements PropertyLoadSaver. If an []I, each element must be a valid dst +// for Get: it must be a struct pointer or implement PropertyLoadSaver. +// +// As a special case, PropertyList is an invalid type for dst, even though a +// PropertyList is a slice of structs. It is treated as invalid to avoid being +// mistakenly passed when []PropertyList was intended. +func (c *Client) GetMulti(ctx context.Context, keys []*Key, dst interface{}) error { + return c.get(ctx, keys, dst, nil) +} + +func (c *Client) get(ctx context.Context, keys []*Key, dst interface{}, opts *pb.ReadOptions) error { + v := reflect.ValueOf(dst) + multiArgType, _ := checkMultiArg(v) + + // Sanity checks + if multiArgType == multiArgTypeInvalid { + return errors.New("datastore: dst has invalid type") + } + if len(keys) != v.Len() { + return errors.New("datastore: keys and dst slices have different length") + } + if len(keys) == 0 { + return nil + } + + // Go through keys, validate them, serialize then, and create a dict mapping them to their index + multiErr, any := make(MultiError, len(keys)), false + keyMap := make(map[string]int) + pbKeys := make([]*pb.Key, len(keys)) + for i, k := range keys { + if !k.valid() { + multiErr[i] = ErrInvalidKey + any = true + } else { + keyMap[k.String()] = i + pbKeys[i] = keyToProto(k) + } + } + if any { + return multiErr + } + req := &pb.LookupRequest{ + Key: pbKeys, + ReadOptions: opts, + } + resp := &pb.LookupResponse{} + if err := c.call(ctx, "lookup", req, resp); err != nil { + return err + } + if len(resp.Deferred) > 0 { + // TODO(jbd): Assess whether we should retry the deferred keys. + return errors.New("datastore: some entities temporarily unavailable") + } + if len(keys) != len(resp.Found)+len(resp.Missing) { + return errors.New("datastore: internal error: server returned the wrong number of entities") + } + for _, e := range resp.Found { + k := protoToKey(e.Entity.Key) + index := keyMap[k.String()] + elem := v.Index(index) + if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct { + elem = elem.Addr() + } + err := loadEntity(elem.Interface(), e.Entity) + if err != nil { + multiErr[index] = err + any = true + } + } + for _, e := range resp.Missing { + k := protoToKey(e.Entity.Key) + multiErr[keyMap[k.String()]] = ErrNoSuchEntity + any = true + } + if any { + return multiErr + } + return nil +} + +// Put saves the entity src into the datastore with key k. src must be a struct +// pointer or implement PropertyLoadSaver; if a struct pointer then any +// unexported fields of that struct will be skipped. If k is an incomplete key, +// the returned key will be a unique key generated by the datastore. +func (c *Client) Put(ctx context.Context, key *Key, src interface{}) (*Key, error) { + k, err := c.PutMulti(ctx, []*Key{key}, []interface{}{src}) + if err != nil { + if me, ok := err.(MultiError); ok { + return nil, me[0] + } + return nil, err + } + return k[0], nil +} + +// PutMulti is a batch version of Put. +// +// src must satisfy the same conditions as the dst argument to GetMulti. +func (c *Client) PutMulti(ctx context.Context, keys []*Key, src interface{}) ([]*Key, error) { + mutation, err := putMutation(keys, src) + if err != nil { + return nil, err + } + + // Make the request. + req := &pb.CommitRequest{ + Mutation: mutation, + Mode: pb.CommitRequest_NON_TRANSACTIONAL.Enum(), + } + resp := &pb.CommitResponse{} + if err := c.call(ctx, "commit", req, resp); err != nil { + return nil, err + } + + // Copy any newly minted keys into the returned keys. + newKeys := make(map[int]int) // Map of index in returned slice to index in response. + ret := make([]*Key, len(keys)) + var idx int + for i, key := range keys { + if key.Incomplete() { + // This key will be in the mutation result. + newKeys[i] = idx + idx++ + } else { + ret[i] = key + } + } + if len(newKeys) != len(resp.MutationResult.InsertAutoIdKey) { + return nil, errors.New("datastore: internal error: server returned the wrong number of keys") + } + for retI, respI := range newKeys { + ret[retI] = protoToKey(resp.MutationResult.InsertAutoIdKey[respI]) + } + return ret, nil +} + +func putMutation(keys []*Key, src interface{}) (*pb.Mutation, error) { + v := reflect.ValueOf(src) + multiArgType, _ := checkMultiArg(v) + if multiArgType == multiArgTypeInvalid { + return nil, errors.New("datastore: src has invalid type") + } + if len(keys) != v.Len() { + return nil, errors.New("datastore: key and src slices have different length") + } + if len(keys) == 0 { + return nil, nil + } + if err := multiValid(keys); err != nil { + return nil, err + } + var upsert, insert []*pb.Entity + for i, k := range keys { + val := reflect.ValueOf(src).Index(i) + // If src is an interface slice []interface{}{ent1, ent2} + if val.Kind() == reflect.Interface && val.Elem().Kind() == reflect.Slice { + val = val.Elem() + } + // If src is a slice of ptrs []*T{ent1, ent2} + if val.Kind() == reflect.Ptr && val.Elem().Kind() == reflect.Slice { + val = val.Elem() + } + p, err := saveEntity(k, val.Interface()) + if err != nil { + return nil, fmt.Errorf("datastore: Error while saving %v: %v", k.String(), err) + } + if k.Incomplete() { + insert = append(insert, p) + } else { + upsert = append(upsert, p) + } + } + + return &pb.Mutation{ + InsertAutoId: insert, + Upsert: upsert, + }, nil +} + +// Delete deletes the entity for the given key. +func (c *Client) Delete(ctx context.Context, key *Key) error { + err := c.DeleteMulti(ctx, []*Key{key}) + if me, ok := err.(MultiError); ok { + return me[0] + } + return err +} + +// DeleteMulti is a batch version of Delete. +func (c *Client) DeleteMulti(ctx context.Context, keys []*Key) error { + mutation, err := deleteMutation(keys) + if err != nil { + return err + } + + req := &pb.CommitRequest{ + Mutation: mutation, + Mode: pb.CommitRequest_NON_TRANSACTIONAL.Enum(), + } + resp := &pb.CommitResponse{} + return c.call(ctx, "commit", req, resp) +} + +func deleteMutation(keys []*Key) (*pb.Mutation, error) { + protoKeys := make([]*pb.Key, len(keys)) + for i, k := range keys { + if k.Incomplete() { + return nil, fmt.Errorf("datastore: can't delete the incomplete key: %v", k) + } + protoKeys[i] = keyToProto(k) + } + + return &pb.Mutation{ + Delete: protoKeys, + }, nil +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/datastore/datastore_test.go b/Godeps/_workspace/src/google.golang.org/cloud/datastore/datastore_test.go new file mode 100644 index 000000000..86896bf0f --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/datastore/datastore_test.go @@ -0,0 +1,1359 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "strings" + "testing" + "time" +) + +type ( + myBlob []byte + myByte byte + myString string +) + +func makeMyByteSlice(n int) []myByte { + b := make([]myByte, n) + for i := range b { + b[i] = myByte(i) + } + return b +} + +func makeInt8Slice(n int) []int8 { + b := make([]int8, n) + for i := range b { + b[i] = int8(i) + } + return b +} + +func makeUint8Slice(n int) []uint8 { + b := make([]uint8, n) + for i := range b { + b[i] = uint8(i) + } + return b +} + +func newKey(stringID string, parent *Key) *Key { + return &Key{ + kind: "kind", + name: stringID, + id: 0, + parent: parent, + } +} + +var ( + testKey0 = newKey("name0", nil) + testKey1a = newKey("name1", nil) + testKey1b = newKey("name1", nil) + testKey2a = newKey("name2", testKey0) + testKey2b = newKey("name2", testKey0) +) + +type B0 struct { + B []byte `datastore:",noindex"` +} + +type B1 struct { + B []int8 +} + +type B2 struct { + B myBlob `datastore:",noindex"` +} + +type B3 struct { + B []myByte `datastore:",noindex"` +} + +type B4 struct { + B [][]byte +} + +type C0 struct { + I int + C chan int +} + +type C1 struct { + I int + C *chan int +} + +type C2 struct { + I int + C []chan int +} + +type C3 struct { + C string +} + +type E struct{} + +type K0 struct { + K *Key +} + +type K1 struct { + K []*Key +} + +type N0 struct { + X0 + Nonymous X0 + Ignore string `datastore:"-"` + Other string +} + +type N1 struct { + X0 + Nonymous []X0 + Ignore string `datastore:"-"` + Other string +} + +type N2 struct { + N1 `datastore:"red"` + Green N1 `datastore:"green"` + Blue N1 + White N1 `datastore:"-"` +} + +type O0 struct { + I int64 +} + +type O1 struct { + I int32 +} + +type U0 struct { + U uint +} + +type U1 struct { + U string +} + +type T struct { + T time.Time +} + +type X0 struct { + S string + I int + i int +} + +type X1 struct { + S myString + I int32 + J int64 +} + +type X2 struct { + Z string + i int +} + +type X3 struct { + S bool + I int +} + +type Y0 struct { + B bool + F []float64 + G []float64 +} + +type Y1 struct { + B bool + F float64 +} + +type Y2 struct { + B bool + F []int64 +} + +type Tagged struct { + A int `datastore:"a,noindex"` + B []int `datastore:"b"` + C int `datastore:",noindex"` + D int `datastore:""` + E int + I int `datastore:"-"` + J int `datastore:",noindex" json:"j"` + + Y0 `datastore:"-"` + Z chan int `datastore:"-,"` +} + +type InvalidTagged1 struct { + I int `datastore:"\t"` +} + +type InvalidTagged2 struct { + I int + J int `datastore:"I"` +} + +type Inner1 struct { + W int32 + X string +} + +type Inner2 struct { + Y float64 +} + +type Inner3 struct { + Z bool +} + +type Outer struct { + A int16 + I []Inner1 + J Inner2 + Inner3 +} + +type OuterEquivalent struct { + A int16 + IDotW []int32 `datastore:"I.W"` + IDotX []string `datastore:"I.X"` + JDotY float64 `datastore:"J.Y"` + Z bool +} + +type Dotted struct { + A DottedA `datastore:"A0.A1.A2"` +} + +type DottedA struct { + B DottedB `datastore:"B3"` +} + +type DottedB struct { + C int `datastore:"C4.C5"` +} + +type SliceOfSlices struct { + I int + S []struct { + J int + F []float64 + } +} + +type Recursive struct { + I int + R []Recursive +} + +type MutuallyRecursive0 struct { + I int + R []MutuallyRecursive1 +} + +type MutuallyRecursive1 struct { + I int + R []MutuallyRecursive0 +} + +type Doubler struct { + S string + I int64 + B bool +} + +func (d *Doubler) Load(props []Property) error { + return LoadStruct(d, props) +} + +func (d *Doubler) Save() ([]Property, error) { + // Save the default Property slice to an in-memory buffer (a PropertyList). + props, err := SaveStruct(d) + if err != nil { + return nil, err + } + var list PropertyList + if err := list.Load(props); err != nil { + return nil, err + } + + // Edit that PropertyList, and send it on. + for i := range list { + switch v := list[i].Value.(type) { + case string: + // + means string concatenation. + list[i].Value = v + v + case int64: + // + means integer addition. + list[i].Value = v + v + } + } + return list.Save() +} + +var _ PropertyLoadSaver = (*Doubler)(nil) + +type Deriver struct { + S, Derived, Ignored string +} + +func (e *Deriver) Load(props []Property) error { + for _, p := range props { + if p.Name != "S" { + continue + } + e.S = p.Value.(string) + e.Derived = "derived+" + e.S + } + return nil +} + +func (e *Deriver) Save() ([]Property, error) { + return []Property{ + { + Name: "S", + Value: e.S, + }, + }, nil +} + +var _ PropertyLoadSaver = (*Deriver)(nil) + +type BadMultiPropEntity struct{} + +func (e *BadMultiPropEntity) Load(props []Property) error { + return errors.New("unimplemented") +} + +func (e *BadMultiPropEntity) Save() ([]Property, error) { + // Write multiple properties with the same name "I", but Multiple is false. + var props []Property + for i := 0; i < 3; i++ { + props = append(props, Property{ + Name: "I", + Value: int64(i), + }) + } + return props, nil +} + +var _ PropertyLoadSaver = (*BadMultiPropEntity)(nil) + +type testCase struct { + desc string + src interface{} + want interface{} + putErr string + getErr string +} + +var testCases = []testCase{ + { + "chan save fails", + &C0{I: -1}, + &E{}, + "unsupported struct field", + "", + }, + { + "*chan save fails", + &C1{I: -1}, + &E{}, + "unsupported struct field", + "", + }, + { + "[]chan save fails", + &C2{I: -1, C: make([]chan int, 8)}, + &E{}, + "unsupported struct field", + "", + }, + { + "chan load fails", + &C3{C: "not a chan"}, + &C0{}, + "", + "type mismatch", + }, + { + "*chan load fails", + &C3{C: "not a *chan"}, + &C1{}, + "", + "type mismatch", + }, + { + "[]chan load fails", + &C3{C: "not a []chan"}, + &C2{}, + "", + "type mismatch", + }, + { + "empty struct", + &E{}, + &E{}, + "", + "", + }, + { + "key", + &K0{K: testKey1a}, + &K0{K: testKey1b}, + "", + "", + }, + { + "key with parent", + &K0{K: testKey2a}, + &K0{K: testKey2b}, + "", + "", + }, + { + "nil key", + &K0{}, + &K0{}, + "", + "", + }, + { + "all nil keys in slice", + &K1{[]*Key{nil, nil}}, + &K1{[]*Key{nil, nil}}, + "", + "", + }, + { + "some nil keys in slice", + &K1{[]*Key{testKey1a, nil, testKey2a}}, + &K1{[]*Key{testKey1b, nil, testKey2b}}, + "", + "", + }, + { + "overflow", + &O0{I: 1 << 48}, + &O1{}, + "", + "overflow", + }, + { + "time", + &T{T: time.Unix(1e9, 0)}, + &T{T: time.Unix(1e9, 0)}, + "", + "", + }, + { + "time as props", + &T{T: time.Unix(1e9, 0)}, + &PropertyList{ + Property{Name: "T", Value: time.Unix(1e9, 0), NoIndex: false}, + }, + "", + "", + }, + { + "uint save", + &U0{U: 1}, + &U0{}, + "unsupported struct field", + "", + }, + { + "uint load", + &U1{U: "not a uint"}, + &U0{}, + "", + "type mismatch", + }, + { + "zero", + &X0{}, + &X0{}, + "", + "", + }, + { + "basic", + &X0{S: "one", I: 2, i: 3}, + &X0{S: "one", I: 2}, + "", + "", + }, + { + "save string/int load myString/int32", + &X0{S: "one", I: 2, i: 3}, + &X1{S: "one", I: 2}, + "", + "", + }, + { + "missing fields", + &X0{S: "one", I: 2, i: 3}, + &X2{}, + "", + "no such struct field", + }, + { + "save string load bool", + &X0{S: "one", I: 2, i: 3}, + &X3{I: 2}, + "", + "type mismatch", + }, + { + "basic slice", + &Y0{B: true, F: []float64{7, 8, 9}}, + &Y0{B: true, F: []float64{7, 8, 9}}, + "", + "", + }, + { + "save []float64 load float64", + &Y0{B: true, F: []float64{7, 8, 9}}, + &Y1{B: true}, + "", + "requires a slice", + }, + { + "save []float64 load []int64", + &Y0{B: true, F: []float64{7, 8, 9}}, + &Y2{B: true}, + "", + "type mismatch", + }, + { + "single slice is too long", + &Y0{F: make([]float64, maxIndexedProperties+1)}, + &Y0{}, + "too many indexed properties", + "", + }, + { + "two slices are too long", + &Y0{F: make([]float64, maxIndexedProperties), G: make([]float64, maxIndexedProperties)}, + &Y0{}, + "too many indexed properties", + "", + }, + { + "one slice and one scalar are too long", + &Y0{F: make([]float64, maxIndexedProperties), B: true}, + &Y0{}, + "too many indexed properties", + "", + }, + { + "long blob", + &B0{B: makeUint8Slice(maxIndexedProperties + 1)}, + &B0{B: makeUint8Slice(maxIndexedProperties + 1)}, + "", + "", + }, + { + "long []int8 is too long", + &B1{B: makeInt8Slice(maxIndexedProperties + 1)}, + &B1{}, + "too many indexed properties", + "", + }, + { + "short []int8", + &B1{B: makeInt8Slice(3)}, + &B1{B: makeInt8Slice(3)}, + "", + "", + }, + { + "long myBlob", + &B2{B: makeUint8Slice(maxIndexedProperties + 1)}, + &B2{B: makeUint8Slice(maxIndexedProperties + 1)}, + "", + "", + }, + { + "short myBlob", + &B2{B: makeUint8Slice(3)}, + &B2{B: makeUint8Slice(3)}, + "", + "", + }, + { + "long []myByte", + &B3{B: makeMyByteSlice(maxIndexedProperties + 1)}, + &B3{B: makeMyByteSlice(maxIndexedProperties + 1)}, + "", + "", + }, + { + "short []myByte", + &B3{B: makeMyByteSlice(3)}, + &B3{B: makeMyByteSlice(3)}, + "", + "", + }, + { + "slice of blobs", + &B4{B: [][]byte{ + makeUint8Slice(3), + makeUint8Slice(4), + makeUint8Slice(5), + }}, + &B4{B: [][]byte{ + makeUint8Slice(3), + makeUint8Slice(4), + makeUint8Slice(5), + }}, + "", + "", + }, + { + "[]byte must be noindex", + &PropertyList{ + Property{Name: "B", Value: makeUint8Slice(1501), NoIndex: false}, + }, + nil, + "cannot index a Property", + "", + }, + { + "save tagged load props", + &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, I: 6, J: 7}, + &PropertyList{ + // A and B are renamed to a and b; A and C are noindex, I is ignored. + // Order is alphabetical + Property{Name: "a", Value: int64(1), NoIndex: true}, + Property{Name: "b", Value: int64(21), NoIndex: false, Multiple: true}, + Property{Name: "b", Value: int64(22), NoIndex: false, Multiple: true}, + Property{Name: "b", Value: int64(23), NoIndex: false, Multiple: true}, + Property{Name: "C", Value: int64(3), NoIndex: true}, + Property{Name: "D", Value: int64(4), NoIndex: false}, + Property{Name: "E", Value: int64(5), NoIndex: false}, + Property{Name: "J", Value: int64(7), NoIndex: true}, + }, + "", + "", + }, + { + "save tagged load tagged", + &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, I: 6, J: 7}, + &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, J: 7}, + "", + "", + }, + { + "save props load tagged", + &PropertyList{ + Property{Name: "A", Value: int64(11), NoIndex: true}, + Property{Name: "a", Value: int64(12), NoIndex: true}, + }, + &Tagged{A: 12}, + "", + `cannot load field "A"`, + }, + { + "invalid tagged1", + &InvalidTagged1{I: 1}, + &InvalidTagged1{}, + "struct tag has invalid property name", + "", + }, + { + "invalid tagged2", + &InvalidTagged2{I: 1, J: 2}, + &InvalidTagged2{}, + "struct tag has repeated property name", + "", + }, + { + "doubler", + &Doubler{S: "s", I: 1, B: true}, + &Doubler{S: "ss", I: 2, B: true}, + "", + "", + }, + { + "save struct load props", + &X0{S: "s", I: 1}, + &PropertyList{ + Property{Name: "S", Value: "s", NoIndex: false}, + Property{Name: "I", Value: int64(1), NoIndex: false}, + }, + "", + "", + }, + { + "save props load struct", + &PropertyList{ + Property{Name: "S", Value: "s", NoIndex: false}, + Property{Name: "I", Value: int64(1), NoIndex: false}, + }, + &X0{S: "s", I: 1}, + "", + "", + }, + { + "nil-value props", + &PropertyList{ + Property{Name: "I", Value: nil, NoIndex: false}, + Property{Name: "B", Value: nil, NoIndex: false}, + Property{Name: "S", Value: nil, NoIndex: false}, + Property{Name: "F", Value: nil, NoIndex: false}, + Property{Name: "K", Value: nil, NoIndex: false}, + Property{Name: "T", Value: nil, NoIndex: false}, + Property{Name: "J", Value: nil, NoIndex: false}, + Property{Name: "J", Value: int64(7), NoIndex: false}, + Property{Name: "J", Value: nil, NoIndex: false}, + }, + &struct { + I int64 + B bool + S string + F float64 + K *Key + T time.Time + J []int64 + }{ + J: []int64{0, 7, 0}, + }, + "", + "", + }, + { + "save outer load props", + &Outer{ + A: 1, + I: []Inner1{ + {10, "ten"}, + {20, "twenty"}, + {30, "thirty"}, + }, + J: Inner2{ + Y: 3.14, + }, + Inner3: Inner3{ + Z: true, + }, + }, + &PropertyList{ + Property{Name: "A", Value: int64(1), NoIndex: false}, + Property{Name: "I.W", Value: int64(10), NoIndex: false, Multiple: true}, + Property{Name: "I.W", Value: int64(20), NoIndex: false, Multiple: true}, + Property{Name: "I.W", Value: int64(30), NoIndex: false, Multiple: true}, + Property{Name: "I.X", Value: "ten", NoIndex: false, Multiple: true}, + Property{Name: "I.X", Value: "twenty", NoIndex: false, Multiple: true}, + Property{Name: "I.X", Value: "thirty", NoIndex: false, Multiple: true}, + Property{Name: "J.Y", Value: float64(3.14), NoIndex: false}, + Property{Name: "Z", Value: true, NoIndex: false}, + }, + "", + "", + }, + { + "save props load outer-equivalent", + &PropertyList{ + Property{Name: "A", Value: int64(1), NoIndex: false}, + Property{Name: "I.W", Value: int64(10), NoIndex: false}, + Property{Name: "I.X", Value: "ten", NoIndex: false}, + Property{Name: "I.W", Value: int64(20), NoIndex: false}, + Property{Name: "I.X", Value: "twenty", NoIndex: false}, + Property{Name: "I.W", Value: int64(30), NoIndex: false}, + Property{Name: "I.X", Value: "thirty", NoIndex: false}, + Property{Name: "J.Y", Value: float64(3.14), NoIndex: false}, + Property{Name: "Z", Value: true, NoIndex: false}, + }, + &OuterEquivalent{ + A: 1, + IDotW: []int32{10, 20, 30}, + IDotX: []string{"ten", "twenty", "thirty"}, + JDotY: 3.14, + Z: true, + }, + "", + "", + }, + { + "save outer-equivalent load outer", + &OuterEquivalent{ + A: 1, + IDotW: []int32{10, 20, 30}, + IDotX: []string{"ten", "twenty", "thirty"}, + JDotY: 3.14, + Z: true, + }, + &Outer{ + A: 1, + I: []Inner1{ + {10, "ten"}, + {20, "twenty"}, + {30, "thirty"}, + }, + J: Inner2{ + Y: 3.14, + }, + Inner3: Inner3{ + Z: true, + }, + }, + "", + "", + }, + { + "dotted names save", + &Dotted{A: DottedA{B: DottedB{C: 88}}}, + &PropertyList{ + Property{Name: "A0.A1.A2.B3.C4.C5", Value: int64(88), NoIndex: false}, + }, + "", + "", + }, + { + "dotted names load", + &PropertyList{ + Property{Name: "A0.A1.A2.B3.C4.C5", Value: int64(99), NoIndex: false}, + }, + &Dotted{A: DottedA{B: DottedB{C: 99}}}, + "", + "", + }, + { + "save struct load deriver", + &X0{S: "s", I: 1}, + &Deriver{S: "s", Derived: "derived+s"}, + "", + "", + }, + { + "save deriver load struct", + &Deriver{S: "s", Derived: "derived+s", Ignored: "ignored"}, + &X0{S: "s"}, + "", + "", + }, + { + "zero time.Time", + &T{T: time.Time{}}, + &T{T: time.Time{}}, + "", + "", + }, + { + "time.Time near Unix zero time", + &T{T: time.Unix(0, 4e3)}, + &T{T: time.Unix(0, 4e3)}, + "", + "", + }, + { + "time.Time, far in the future", + &T{T: time.Date(99999, 1, 1, 0, 0, 0, 0, time.UTC)}, + &T{T: time.Date(99999, 1, 1, 0, 0, 0, 0, time.UTC)}, + "", + "", + }, + { + "time.Time, very far in the past", + &T{T: time.Date(-300000, 1, 1, 0, 0, 0, 0, time.UTC)}, + &T{}, + "time value out of range", + "", + }, + { + "time.Time, very far in the future", + &T{T: time.Date(294248, 1, 1, 0, 0, 0, 0, time.UTC)}, + &T{}, + "time value out of range", + "", + }, + { + "structs", + &N0{ + X0: X0{S: "one", I: 2, i: 3}, + Nonymous: X0{S: "four", I: 5, i: 6}, + Ignore: "ignore", + Other: "other", + }, + &N0{ + X0: X0{S: "one", I: 2}, + Nonymous: X0{S: "four", I: 5}, + Other: "other", + }, + "", + "", + }, + { + "slice of structs", + &N1{ + X0: X0{S: "one", I: 2, i: 3}, + Nonymous: []X0{ + {S: "four", I: 5, i: 6}, + {S: "seven", I: 8, i: 9}, + {S: "ten", I: 11, i: 12}, + {S: "thirteen", I: 14, i: 15}, + }, + Ignore: "ignore", + Other: "other", + }, + &N1{ + X0: X0{S: "one", I: 2}, + Nonymous: []X0{ + {S: "four", I: 5}, + {S: "seven", I: 8}, + {S: "ten", I: 11}, + {S: "thirteen", I: 14}, + }, + Other: "other", + }, + "", + "", + }, + { + "structs with slices of structs", + &N2{ + N1: N1{ + X0: X0{S: "rouge"}, + Nonymous: []X0{ + {S: "rosso0"}, + {S: "rosso1"}, + }, + }, + Green: N1{ + X0: X0{S: "vert"}, + Nonymous: []X0{ + {S: "verde0"}, + {S: "verde1"}, + {S: "verde2"}, + }, + }, + Blue: N1{ + X0: X0{S: "bleu"}, + Nonymous: []X0{ + {S: "blu0"}, + {S: "blu1"}, + {S: "blu2"}, + {S: "blu3"}, + }, + }, + }, + &N2{ + N1: N1{ + X0: X0{S: "rouge"}, + Nonymous: []X0{ + {S: "rosso0"}, + {S: "rosso1"}, + }, + }, + Green: N1{ + X0: X0{S: "vert"}, + Nonymous: []X0{ + {S: "verde0"}, + {S: "verde1"}, + {S: "verde2"}, + }, + }, + Blue: N1{ + X0: X0{S: "bleu"}, + Nonymous: []X0{ + {S: "blu0"}, + {S: "blu1"}, + {S: "blu2"}, + {S: "blu3"}, + }, + }, + }, + "", + "", + }, + { + "save structs load props", + &N2{ + N1: N1{ + X0: X0{S: "rouge"}, + Nonymous: []X0{ + {S: "rosso0"}, + {S: "rosso1"}, + }, + }, + Green: N1{ + X0: X0{S: "vert"}, + Nonymous: []X0{ + {S: "verde0"}, + {S: "verde1"}, + {S: "verde2"}, + }, + }, + Blue: N1{ + X0: X0{S: "bleu"}, + Nonymous: []X0{ + {S: "blu0"}, + {S: "blu1"}, + {S: "blu2"}, + {S: "blu3"}, + }, + }, + }, + &PropertyList{ + Property{Name: "red.S", Value: "rouge", NoIndex: false}, + Property{Name: "red.I", Value: int64(0), NoIndex: false}, + Property{Name: "red.Nonymous.S", Value: "rosso0", NoIndex: false, Multiple: true}, + Property{Name: "red.Nonymous.S", Value: "rosso1", NoIndex: false, Multiple: true}, + Property{Name: "red.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true}, + Property{Name: "red.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true}, + Property{Name: "red.Other", Value: "", NoIndex: false}, + Property{Name: "green.S", Value: "vert", NoIndex: false}, + Property{Name: "green.I", Value: int64(0), NoIndex: false}, + Property{Name: "green.Nonymous.S", Value: "verde0", NoIndex: false, Multiple: true}, + Property{Name: "green.Nonymous.S", Value: "verde1", NoIndex: false, Multiple: true}, + Property{Name: "green.Nonymous.S", Value: "verde2", NoIndex: false, Multiple: true}, + Property{Name: "green.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true}, + Property{Name: "green.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true}, + Property{Name: "green.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true}, + Property{Name: "green.Other", Value: "", NoIndex: false}, + Property{Name: "Blue.S", Value: "bleu", NoIndex: false}, + Property{Name: "Blue.I", Value: int64(0), NoIndex: false}, + Property{Name: "Blue.Nonymous.S", Value: "blu0", NoIndex: false, Multiple: true}, + Property{Name: "Blue.Nonymous.S", Value: "blu1", NoIndex: false, Multiple: true}, + Property{Name: "Blue.Nonymous.S", Value: "blu2", NoIndex: false, Multiple: true}, + Property{Name: "Blue.Nonymous.S", Value: "blu3", NoIndex: false, Multiple: true}, + Property{Name: "Blue.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true}, + Property{Name: "Blue.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true}, + Property{Name: "Blue.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true}, + Property{Name: "Blue.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true}, + Property{Name: "Blue.Other", Value: "", NoIndex: false}, + }, + "", + "", + }, + { + "save props load structs with ragged fields", + &PropertyList{ + Property{Name: "red.S", Value: "rot", NoIndex: false}, + Property{Name: "green.Nonymous.I", Value: int64(10), NoIndex: false}, + Property{Name: "green.Nonymous.I", Value: int64(11), NoIndex: false}, + Property{Name: "green.Nonymous.I", Value: int64(12), NoIndex: false}, + Property{Name: "green.Nonymous.I", Value: int64(13), NoIndex: false}, + Property{Name: "Blue.Nonymous.S", Value: "blau0", NoIndex: false}, + Property{Name: "Blue.Nonymous.I", Value: int64(20), NoIndex: false}, + Property{Name: "Blue.Nonymous.S", Value: "blau1", NoIndex: false}, + Property{Name: "Blue.Nonymous.I", Value: int64(21), NoIndex: false}, + Property{Name: "Blue.Nonymous.S", Value: "blau2", NoIndex: false}, + }, + &N2{ + N1: N1{ + X0: X0{S: "rot"}, + }, + Green: N1{ + Nonymous: []X0{ + {I: 10}, + {I: 11}, + {I: 12}, + {I: 13}, + }, + }, + Blue: N1{ + Nonymous: []X0{ + {S: "blau0", I: 20}, + {S: "blau1", I: 21}, + {S: "blau2"}, + }, + }, + }, + "", + "", + }, + { + "save structs with noindex tags", + &struct { + A struct { + X string `datastore:",noindex"` + Y string + } `datastore:",noindex"` + B struct { + X string `datastore:",noindex"` + Y string + } + }{}, + &PropertyList{ + Property{Name: "A.X", Value: "", NoIndex: true}, + Property{Name: "A.Y", Value: "", NoIndex: true}, + Property{Name: "B.X", Value: "", NoIndex: true}, + Property{Name: "B.Y", Value: "", NoIndex: false}, + }, + "", + "", + }, + { + "embedded struct with name override", + &struct { + Inner1 `datastore:"foo"` + }{}, + &PropertyList{ + Property{Name: "foo.W", Value: int64(0), NoIndex: false}, + Property{Name: "foo.X", Value: "", NoIndex: false}, + }, + "", + "", + }, + { + "slice of slices", + &SliceOfSlices{}, + nil, + "flattening nested structs leads to a slice of slices", + "", + }, + { + "recursive struct", + &Recursive{}, + nil, + "recursive struct", + "", + }, + { + "mutually recursive struct", + &MutuallyRecursive0{}, + nil, + "recursive struct", + "", + }, + { + "non-exported struct fields", + &struct { + i, J int64 + }{i: 1, J: 2}, + &PropertyList{ + Property{Name: "J", Value: int64(2), NoIndex: false}, + }, + "", + "", + }, + { + "json.RawMessage", + &struct { + J json.RawMessage + }{ + J: json.RawMessage("rawr"), + }, + &PropertyList{ + Property{Name: "J", Value: []byte("rawr"), NoIndex: false}, + }, + "", + "", + }, + { + "json.RawMessage to myBlob", + &struct { + B json.RawMessage + }{ + B: json.RawMessage("rawr"), + }, + &B2{B: myBlob("rawr")}, + "", + "", + }, +} + +// checkErr returns the empty string if either both want and err are zero, +// or if want is a non-empty substring of err's string representation. +func checkErr(want string, err error) string { + if err != nil { + got := err.Error() + if want == "" || strings.Index(got, want) == -1 { + return got + } + } else if want != "" { + return fmt.Sprintf("want error %q", want) + } + return "" +} + +func TestRoundTrip(t *testing.T) { + for _, tc := range testCases { + p, err := saveEntity(testKey0, tc.src) + if s := checkErr(tc.putErr, err); s != "" { + t.Errorf("%s: save: %s", tc.desc, s) + continue + } + if p == nil { + continue + } + var got interface{} + if _, ok := tc.want.(*PropertyList); ok { + got = new(PropertyList) + } else { + got = reflect.New(reflect.TypeOf(tc.want).Elem()).Interface() + } + err = loadEntity(got, p) + if s := checkErr(tc.getErr, err); s != "" { + t.Errorf("%s: load: %s", tc.desc, s) + continue + } + equal := false + if gotT, ok := got.(*T); ok { + // Round tripping a time.Time can result in a different time.Location: Local instead of UTC. + // We therefore test equality explicitly, instead of relying on reflect.DeepEqual. + equal = gotT.T.Equal(tc.want.(*T).T) + } else { + equal = reflect.DeepEqual(got, tc.want) + } + if !equal { + t.Errorf("%s: compare: got %v want %v", tc.desc, got, tc.want) + continue + } + } +} + +func TestQueryConstruction(t *testing.T) { + tests := []struct { + q, exp *Query + err string + }{ + { + q: NewQuery("Foo"), + exp: &Query{ + kind: "Foo", + limit: -1, + }, + }, + { + // Regular filtered query with standard spacing. + q: NewQuery("Foo").Filter("foo >", 7), + exp: &Query{ + kind: "Foo", + filter: []filter{ + { + FieldName: "foo", + Op: greaterThan, + Value: 7, + }, + }, + limit: -1, + }, + }, + { + // Filtered query with no spacing. + q: NewQuery("Foo").Filter("foo=", 6), + exp: &Query{ + kind: "Foo", + filter: []filter{ + { + FieldName: "foo", + Op: equal, + Value: 6, + }, + }, + limit: -1, + }, + }, + { + // Filtered query with funky spacing. + q: NewQuery("Foo").Filter(" foo< ", 8), + exp: &Query{ + kind: "Foo", + filter: []filter{ + { + FieldName: "foo", + Op: lessThan, + Value: 8, + }, + }, + limit: -1, + }, + }, + { + // Filtered query with multicharacter op. + q: NewQuery("Foo").Filter("foo >=", 9), + exp: &Query{ + kind: "Foo", + filter: []filter{ + { + FieldName: "foo", + Op: greaterEq, + Value: 9, + }, + }, + limit: -1, + }, + }, + { + // Query with ordering. + q: NewQuery("Foo").Order("bar"), + exp: &Query{ + kind: "Foo", + order: []order{ + { + FieldName: "bar", + Direction: ascending, + }, + }, + limit: -1, + }, + }, + { + // Query with reverse ordering, and funky spacing. + q: NewQuery("Foo").Order(" - bar"), + exp: &Query{ + kind: "Foo", + order: []order{ + { + FieldName: "bar", + Direction: descending, + }, + }, + limit: -1, + }, + }, + { + // Query with an empty ordering. + q: NewQuery("Foo").Order(""), + err: "empty order", + }, + { + // Query with a + ordering. + q: NewQuery("Foo").Order("+bar"), + err: "invalid order", + }, + } + for i, test := range tests { + if test.q.err != nil { + got := test.q.err.Error() + if !strings.Contains(got, test.err) { + t.Errorf("%d: error mismatch: got %q want something containing %q", i, got, test.err) + } + continue + } + if !reflect.DeepEqual(test.q, test.exp) { + t.Errorf("%d: mismatch: got %v want %v", i, test.q, test.exp) + } + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/datastore/errors.go b/Godeps/_workspace/src/google.golang.org/cloud/datastore/errors.go new file mode 100644 index 000000000..3077f80d3 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/datastore/errors.go @@ -0,0 +1,47 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file provides error functions for common API failure modes. + +package datastore + +import ( + "fmt" +) + +// MultiError is returned by batch operations when there are errors with +// particular elements. Errors will be in a one-to-one correspondence with +// the input elements; successful elements will have a nil entry. +type MultiError []error + +func (m MultiError) Error() string { + s, n := "", 0 + for _, e := range m { + if e != nil { + if n == 0 { + s = e.Error() + } + n++ + } + } + switch n { + case 0: + return "(0 errors)" + case 1: + return s + case 2: + return s + " (and 1 other error)" + } + return fmt.Sprintf("%s (and %d other errors)", s, n-1) +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/datastore/example_test.go b/Godeps/_workspace/src/google.golang.org/cloud/datastore/example_test.go new file mode 100644 index 000000000..391991c95 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/datastore/example_test.go @@ -0,0 +1,251 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore_test + +import ( + "io/ioutil" + "log" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2/google" + "google.golang.org/cloud" + "google.golang.org/cloud/datastore" +) + +// TODO(djd): reevaluate this example given new Client config. +func Example_auth() *datastore.Client { + // Initialize an authorized context with Google Developers Console + // JSON key. Read the google package examples to learn more about + // different authorization flows you can use. + // http://godoc.org/golang.org/x/oauth2/google + jsonKey, err := ioutil.ReadFile("/path/to/json/keyfile.json") + if err != nil { + log.Fatal(err) + } + conf, err := google.JWTConfigFromJSON( + jsonKey, + datastore.ScopeDatastore, + datastore.ScopeUserEmail, + ) + if err != nil { + log.Fatal(err) + } + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id", cloud.WithTokenSource(conf.TokenSource(ctx))) + if err != nil { + log.Fatal(err) + } + // Use the client (see other examples). + return client +} + +func ExampleGet() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + log.Fatal(err) + } + + type Article struct { + Title string + Description string + Body string `datastore:",noindex"` + Author *datastore.Key + PublishedAt time.Time + } + key := datastore.NewKey(ctx, "Article", "articled1", 0, nil) + article := &Article{} + if err := client.Get(ctx, key, article); err != nil { + log.Fatal(err) + } +} + +func ExamplePut() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + log.Fatal(err) + } + + type Article struct { + Title string + Description string + Body string `datastore:",noindex"` + Author *datastore.Key + PublishedAt time.Time + } + newKey := datastore.NewIncompleteKey(ctx, "Article", nil) + _, err = client.Put(ctx, newKey, &Article{ + Title: "The title of the article", + Description: "The description of the article...", + Body: "...", + Author: datastore.NewKey(ctx, "Author", "jbd", 0, nil), + PublishedAt: time.Now(), + }) + if err != nil { + log.Fatal(err) + } +} + +func ExampleDelete() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + log.Fatal(err) + } + + key := datastore.NewKey(ctx, "Article", "articled1", 0, nil) + if err := client.Delete(ctx, key); err != nil { + log.Fatal(err) + } +} + +type Post struct { + Title string + PublishedAt time.Time + Comments int +} + +func ExampleGetMulti() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + log.Fatal(err) + } + + keys := []*datastore.Key{ + datastore.NewKey(ctx, "Post", "post1", 0, nil), + datastore.NewKey(ctx, "Post", "post2", 0, nil), + datastore.NewKey(ctx, "Post", "post3", 0, nil), + } + posts := make([]Post, 3) + if err := client.GetMulti(ctx, keys, posts); err != nil { + log.Println(err) + } +} + +func ExamplePutMulti_slice() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + log.Fatal(err) + } + + keys := []*datastore.Key{ + datastore.NewKey(ctx, "Post", "post1", 0, nil), + datastore.NewKey(ctx, "Post", "post2", 0, nil), + } + + // PutMulti with a Post slice. + posts := []*Post{ + {Title: "Post 1", PublishedAt: time.Now()}, + {Title: "Post 2", PublishedAt: time.Now()}, + } + if _, err := client.PutMulti(ctx, keys, posts); err != nil { + log.Fatal(err) + } +} + +func ExamplePutMulti_interfaceSlice() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + log.Fatal(err) + } + + keys := []*datastore.Key{ + datastore.NewKey(ctx, "Post", "post1", 0, nil), + datastore.NewKey(ctx, "Post", "post2", 0, nil), + } + + // PutMulti with an empty interface slice. + posts := []interface{}{ + &Post{Title: "Post 1", PublishedAt: time.Now()}, + &Post{Title: "Post 2", PublishedAt: time.Now()}, + } + if _, err := client.PutMulti(ctx, keys, posts); err != nil { + log.Fatal(err) + } +} + +func ExampleQuery() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + log.Fatal(err) + } + + // Count the number of the post entities. + q := datastore.NewQuery("Post") + n, err := client.Count(ctx, q) + if err != nil { + log.Fatal(err) + } + log.Println("There are %d posts.", n) + + // List the posts published since yesterday. + yesterday := time.Now().Add(-24 * time.Hour) + q = datastore.NewQuery("Post").Filter("PublishedAt >", yesterday) + it := client.Run(ctx, q) + // Use the iterator. + _ = it + + // Order the posts by the number of comments they have recieved. + datastore.NewQuery("Post").Order("-Comments") + + // Start listing from an offset and limit the results. + datastore.NewQuery("Post").Offset(20).Limit(10) +} + +func ExampleTransaction() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + log.Fatal(err) + } + const retries = 3 + + // Increment a counter. + // See https://cloud.google.com/appengine/articles/sharding_counters for + // a more scalable solution. + type Counter struct { + Count int + } + + key := datastore.NewKey(ctx, "counter", "CounterA", 0, nil) + + for i := 0; i < retries; i++ { + tx, err := client.NewTransaction(ctx) + if err != nil { + break + } + + var c Counter + if err := tx.Get(key, &c); err != nil && err != datastore.ErrNoSuchEntity { + break + } + c.Count++ + if _, err := tx.Put(key, &c); err != nil { + break + } + + // Attempt to commit the transaction. If there's a conflict, try again. + if _, err := tx.Commit(); err != datastore.ErrConcurrentTransaction { + break + } + } + +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/datastore/integration_test.go b/Godeps/_workspace/src/google.golang.org/cloud/datastore/integration_test.go new file mode 100644 index 000000000..01d52ab65 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/datastore/integration_test.go @@ -0,0 +1,569 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build integration + +package datastore + +import ( + "fmt" + "log" + + "reflect" + "sort" + "strings" + "testing" + "time" + + "golang.org/x/net/context" + "google.golang.org/cloud" + "google.golang.org/cloud/internal/testutil" +) + +func newClient(ctx context.Context) *Client { + ts := testutil.TokenSource(ctx, ScopeDatastore, ScopeUserEmail) + client, err := NewClient(ctx, testutil.ProjID(), cloud.WithTokenSource(ts)) + if err != nil { + log.Fatal(err) + } + return client +} + +func TestBasics(t *testing.T) { + type X struct { + I int + S string + T time.Time + } + ctx := context.Background() + client := newClient(ctx) + x0 := X{66, "99", time.Now().Truncate(time.Millisecond)} + k, err := client.Put(ctx, NewIncompleteKey(ctx, "BasicsX", nil), &x0) + if err != nil { + t.Fatalf("client.Put: %v", err) + } + x1 := X{} + err = client.Get(ctx, k, &x1) + if err != nil { + t.Errorf("client.Get: %v", err) + } + err = client.Delete(ctx, k) + if err != nil { + t.Errorf("client.Delete: %v", err) + } + if !reflect.DeepEqual(x0, x1) { + t.Errorf("compare: x0=%v, x1=%v", x0, x1) + } +} + +func TestListValues(t *testing.T) { + p0 := PropertyList{ + {Name: "L", Value: int64(12), Multiple: true}, + {Name: "L", Value: "string", Multiple: true}, + {Name: "L", Value: true, Multiple: true}, + } + ctx := context.Background() + client := newClient(ctx) + k, err := client.Put(ctx, NewIncompleteKey(ctx, "ListValue", nil), &p0) + if err != nil { + t.Fatalf("client.Put: %v", err) + } + var p1 PropertyList + if err := client.Get(ctx, k, &p1); err != nil { + t.Errorf("client.Get: %v", err) + } + if !reflect.DeepEqual(p0, p1) { + t.Errorf("compare:\np0=%v\np1=%#v", p0, p1) + } + if err = client.Delete(ctx, k); err != nil { + t.Errorf("client.Delete: %v", err) + } +} + +func TestGetMulti(t *testing.T) { + type X struct { + I int + } + ctx := context.Background() + client := newClient(ctx) + p := NewKey(ctx, "X", "", time.Now().Unix(), nil) + + cases := []struct { + key *Key + put bool + }{ + {key: NewKey(ctx, "X", "item1", 0, p), put: true}, + {key: NewKey(ctx, "X", "item2", 0, p), put: false}, + {key: NewKey(ctx, "X", "item3", 0, p), put: false}, + {key: NewKey(ctx, "X", "item4", 0, p), put: true}, + } + + var src, dst []*X + var srcKeys, dstKeys []*Key + for _, c := range cases { + dst = append(dst, &X{}) + dstKeys = append(dstKeys, c.key) + if c.put { + src = append(src, &X{}) + srcKeys = append(srcKeys, c.key) + } + } + if _, err := client.PutMulti(ctx, srcKeys, src); err != nil { + t.Error(err) + } + err := client.GetMulti(ctx, dstKeys, dst) + if err == nil { + t.Errorf("client.GetMulti got %v, expected error", err) + } + e, ok := err.(MultiError) + if !ok { + t.Errorf("client.GetMulti got %t, expected MultiError", err) + } + for i, err := range e { + got, want := err, (error)(nil) + if !cases[i].put { + got, want = err, ErrNoSuchEntity + } + if got != want { + t.Errorf("MultiError[%d] == %v, want %v", i, got, want) + } + } +} + +type Z struct { + S string + T string `datastore:",noindex"` + P []byte + K []byte `datastore:",noindex"` +} + +func (z Z) String() string { + var lens []string + v := reflect.ValueOf(z) + for i := 0; i < v.NumField(); i++ { + if l := v.Field(i).Len(); l > 0 { + lens = append(lens, fmt.Sprintf("len(%s)=%d", v.Type().Field(i).Name, l)) + } + } + return fmt.Sprintf("Z{ %s }", strings.Join(lens, ",")) +} + +func TestUnindexableValues(t *testing.T) { + x1500 := strings.Repeat("x", 1500) + x1501 := strings.Repeat("x", 1501) + testCases := []struct { + in Z + wantErr bool + }{ + {in: Z{S: x1500}, wantErr: false}, + {in: Z{S: x1501}, wantErr: true}, + {in: Z{T: x1500}, wantErr: false}, + {in: Z{T: x1501}, wantErr: false}, + {in: Z{P: []byte(x1500)}, wantErr: false}, + {in: Z{P: []byte(x1501)}, wantErr: true}, + {in: Z{K: []byte(x1500)}, wantErr: false}, + {in: Z{K: []byte(x1501)}, wantErr: false}, + } + ctx := context.Background() + client := newClient(ctx) + for _, tt := range testCases { + _, err := client.Put(ctx, NewIncompleteKey(ctx, "BasicsZ", nil), &tt.in) + if (err != nil) != tt.wantErr { + t.Errorf("client.Put %s got err %v, want err %t", tt.in, err, tt.wantErr) + } + } +} + +type SQChild struct { + I, J int + T, U int64 +} + +type SQTestCase struct { + desc string + q *Query + wantCount int + wantSum int +} + +func testSmallQueries(t *testing.T, ctx context.Context, client *Client, parent *Key, children []*SQChild, + testCases []SQTestCase, extraTests ...func()) { + keys := make([]*Key, len(children)) + for i := range keys { + keys[i] = NewIncompleteKey(ctx, "SQChild", parent) + } + keys, err := client.PutMulti(ctx, keys, children) + if err != nil { + t.Fatalf("client.PutMulti: %v", err) + } + defer func() { + err := client.DeleteMulti(ctx, keys) + if err != nil { + t.Errorf("client.DeleteMulti: %v", err) + } + }() + + for _, tc := range testCases { + count, err := client.Count(ctx, tc.q) + if err != nil { + t.Errorf("Count %q: %v", tc.desc, err) + continue + } + if count != tc.wantCount { + t.Errorf("Count %q: got %d want %d", tc.desc, count, tc.wantCount) + continue + } + } + + for _, tc := range testCases { + var got []SQChild + _, err := client.GetAll(ctx, tc.q, &got) + if err != nil { + t.Errorf("client.GetAll %q: %v", tc.desc, err) + continue + } + sum := 0 + for _, c := range got { + sum += c.I + c.J + } + if sum != tc.wantSum { + t.Errorf("sum %q: got %d want %d", tc.desc, sum, tc.wantSum) + continue + } + } + for _, x := range extraTests { + x() + } +} + +func TestFilters(t *testing.T) { + ctx := context.Background() + client := newClient(ctx) + parent := NewKey(ctx, "SQParent", "TestFilters", 0, nil) + now := time.Now().Truncate(time.Millisecond).Unix() + children := []*SQChild{ + {I: 0, T: now, U: now}, + {I: 1, T: now, U: now}, + {I: 2, T: now, U: now}, + {I: 3, T: now, U: now}, + {I: 4, T: now, U: now}, + {I: 5, T: now, U: now}, + {I: 6, T: now, U: now}, + {I: 7, T: now, U: now}, + } + baseQuery := NewQuery("SQChild").Ancestor(parent).Filter("T=", now) + testSmallQueries(t, ctx, client, parent, children, []SQTestCase{ + { + "I>1", + baseQuery.Filter("I>", 1), + 6, + 2 + 3 + 4 + 5 + 6 + 7, + }, + { + "I>2 AND I<=5", + baseQuery.Filter("I>", 2).Filter("I<=", 5), + 3, + 3 + 4 + 5, + }, + { + "I>=3 AND I<3", + baseQuery.Filter("I>=", 3).Filter("I<", 3), + 0, + 0, + }, + { + "I=4", + baseQuery.Filter("I=", 4), + 1, + 4, + }, + }, func() { + got := []*SQChild{} + want := []*SQChild{ + {I: 0, T: now, U: now}, + {I: 1, T: now, U: now}, + {I: 2, T: now, U: now}, + {I: 3, T: now, U: now}, + {I: 4, T: now, U: now}, + {I: 5, T: now, U: now}, + {I: 6, T: now, U: now}, + {I: 7, T: now, U: now}, + } + _, err := client.GetAll(ctx, baseQuery.Order("I"), &got) + if err != nil { + t.Errorf("client.GetAll: %v", err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("compare: got=%v, want=%v", got, want) + } + }, func() { + got := []*SQChild{} + want := []*SQChild{ + {I: 7, T: now, U: now}, + {I: 6, T: now, U: now}, + {I: 5, T: now, U: now}, + {I: 4, T: now, U: now}, + {I: 3, T: now, U: now}, + {I: 2, T: now, U: now}, + {I: 1, T: now, U: now}, + {I: 0, T: now, U: now}, + } + _, err := client.GetAll(ctx, baseQuery.Order("-I"), &got) + if err != nil { + t.Errorf("client.GetAll: %v", err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("compare: got=%v, want=%v", got, want) + } + }) +} + +func TestEventualConsistency(t *testing.T) { + ctx := context.Background() + client := newClient(ctx) + parent := NewKey(ctx, "SQParent", "TestEventualConsistency", 0, nil) + now := time.Now().Truncate(time.Millisecond).Unix() + children := []*SQChild{ + {I: 0, T: now, U: now}, + {I: 1, T: now, U: now}, + {I: 2, T: now, U: now}, + } + query := NewQuery("SQChild").Ancestor(parent).Filter("T =", now).EventualConsistency() + testSmallQueries(t, ctx, client, parent, children, nil, func() { + got, err := client.Count(ctx, query) + if err != nil { + t.Fatalf("Count: %v", err) + } + if got < 0 || 3 < got { + t.Errorf("Count: got %d, want [0,3]", got) + } + }) +} + +func TestProjection(t *testing.T) { + ctx := context.Background() + client := newClient(ctx) + parent := NewKey(ctx, "SQParent", "TestProjection", 0, nil) + now := time.Now().Truncate(time.Millisecond).Unix() + children := []*SQChild{ + {I: 1 << 0, J: 100, T: now, U: now}, + {I: 1 << 1, J: 100, T: now, U: now}, + {I: 1 << 2, J: 200, T: now, U: now}, + {I: 1 << 3, J: 300, T: now, U: now}, + {I: 1 << 4, J: 300, T: now, U: now}, + } + baseQuery := NewQuery("SQChild").Ancestor(parent).Filter("T=", now).Filter("J>", 150) + testSmallQueries(t, ctx, client, parent, children, []SQTestCase{ + { + "project", + baseQuery.Project("J"), + 3, + 200 + 300 + 300, + }, + { + "distinct", + baseQuery.Project("J").Distinct(), + 2, + 200 + 300, + }, + { + "project on meaningful (GD_WHEN) field", + baseQuery.Project("U"), + 3, + 0, + }, + }) +} + +func TestAllocateIDs(t *testing.T) { + ctx := context.Background() + client := newClient(ctx) + keys := make([]*Key, 5) + for i := range keys { + keys[i] = NewIncompleteKey(ctx, "AllocID", nil) + } + keys, err := client.AllocateIDs(ctx, keys) + if err != nil { + t.Errorf("AllocID #0 failed: %v", err) + } + if want := len(keys); want != 5 { + t.Errorf("Expected to allocate 5 keys, %d keys are found", want) + } + for _, k := range keys { + if k.Incomplete() { + t.Errorf("Unexpeceted incomplete key found: %v", k) + } + } +} + +func TestGetAllWithFieldMismatch(t *testing.T) { + type Fat struct { + X, Y int + } + type Thin struct { + X int + } + + ctx := context.Background() + client := newClient(ctx) + // Ancestor queries (those within an entity group) are strongly consistent + // by default, which prevents a test from being flaky. + // See https://cloud.google.com/appengine/docs/go/datastore/queries#Go_Data_consistency + // for more information. + parent := NewKey(ctx, "SQParent", "TestGetAllWithFieldMismatch", 0, nil) + putKeys := make([]*Key, 3) + for i := range putKeys { + putKeys[i] = NewKey(ctx, "GetAllThing", "", int64(10+i), parent) + _, err := client.Put(ctx, putKeys[i], &Fat{X: 20 + i, Y: 30 + i}) + if err != nil { + t.Fatalf("client.Put: %v", err) + } + } + + var got []Thin + want := []Thin{ + {X: 20}, + {X: 21}, + {X: 22}, + } + getKeys, err := client.GetAll(ctx, NewQuery("GetAllThing").Ancestor(parent), &got) + if len(getKeys) != 3 && !reflect.DeepEqual(getKeys, putKeys) { + t.Errorf("client.GetAll: keys differ\ngetKeys=%v\nputKeys=%v", getKeys, putKeys) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("client.GetAll: entities differ\ngot =%v\nwant=%v", got, want) + } + if _, ok := err.(*ErrFieldMismatch); !ok { + t.Errorf("client.GetAll: got err=%v, want ErrFieldMismatch", err) + } +} + +func TestKindlessQueries(t *testing.T) { + type Dee struct { + I int + Why string + } + type Dum struct { + I int + Pling string + } + + ctx := context.Background() + client := newClient(ctx) + parent := NewKey(ctx, "Tweedle", "tweedle", 0, nil) + + keys := []*Key{ + NewKey(ctx, "Dee", "dee0", 0, parent), + NewKey(ctx, "Dum", "dum1", 0, parent), + NewKey(ctx, "Dum", "dum2", 0, parent), + NewKey(ctx, "Dum", "dum3", 0, parent), + } + src := []interface{}{ + &Dee{1, "binary0001"}, + &Dum{2, "binary0010"}, + &Dum{4, "binary0100"}, + &Dum{8, "binary1000"}, + } + keys, err := client.PutMulti(ctx, keys, src) + if err != nil { + t.Fatalf("put: %v", err) + } + + testCases := []struct { + desc string + query *Query + want []int + wantErr string + }{ + { + desc: "Dee", + query: NewQuery("Dee"), + want: []int{1}, + }, + { + desc: "Doh", + query: NewQuery("Doh"), + want: nil}, + { + desc: "Dum", + query: NewQuery("Dum"), + want: []int{2, 4, 8}, + }, + { + desc: "", + query: NewQuery(""), + want: []int{1, 2, 4, 8}, + }, + { + desc: "Kindless filter", + query: NewQuery("").Filter("__key__ =", keys[2]), + want: []int{4}, + }, + { + desc: "Kindless order", + query: NewQuery("").Order("__key__"), + want: []int{1, 2, 4, 8}, + }, + { + desc: "Kindless bad filter", + query: NewQuery("").Filter("I =", 4), + wantErr: "kind is required for filter: I", + }, + { + desc: "Kindless bad order", + query: NewQuery("").Order("-__key__"), + wantErr: "kind is required for all orders except __key__ ascending", + }, + } +loop: + for _, tc := range testCases { + q := tc.query.Ancestor(parent) + gotCount, err := client.Count(ctx, q) + if err != nil { + if tc.wantErr == "" || !strings.Contains(err.Error(), tc.wantErr) { + t.Errorf("count %q: err %v, want err %q", tc.desc, err, tc.wantErr) + } + continue + } + if tc.wantErr != "" { + t.Errorf("count %q: want err %q", tc.desc, tc.wantErr) + continue + } + if gotCount != len(tc.want) { + t.Errorf("count %q: got %d want %d", tc.desc, gotCount, len(tc.want)) + continue + } + var got []int + for iter := client.Run(ctx, q); ; { + var dst struct { + I int + Why, Pling string + } + _, err := iter.Next(&dst) + if err == Done { + break + } + if err != nil { + t.Errorf("iter.Next %q: %v", tc.desc, err) + continue loop + } + got = append(got, dst.I) + } + sort.Ints(got) + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("elems %q: got %+v want %+v", tc.desc, got, tc.want) + continue + } + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/datastore/key.go b/Godeps/_workspace/src/google.golang.org/cloud/datastore/key.go new file mode 100644 index 000000000..d0c632b5c --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/datastore/key.go @@ -0,0 +1,277 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "bytes" + "encoding/base64" + "encoding/gob" + "errors" + "strconv" + "strings" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + pb "google.golang.org/cloud/internal/datastore" +) + +// Key represents the datastore key for a stored entity, and is immutable. +type Key struct { + kind string + id int64 + name string + parent *Key + + namespace string +} + +func (k *Key) Kind() string { + return k.kind +} + +func (k *Key) ID() int64 { + return k.id +} + +func (k *Key) Name() string { + return k.name +} + +func (k *Key) Parent() *Key { + return k.parent +} + +func (k *Key) SetParent(v *Key) { + if v.Incomplete() { + panic("can't set an incomplete key as parent") + } + k.parent = v +} + +func (k *Key) Namespace() string { + return k.namespace +} + +// Complete returns whether the key does not refer to a stored entity. +func (k *Key) Incomplete() bool { + return k.name == "" && k.id == 0 +} + +// valid returns whether the key is valid. +func (k *Key) valid() bool { + if k == nil { + return false + } + for ; k != nil; k = k.parent { + if k.kind == "" { + return false + } + if k.name != "" && k.id != 0 { + return false + } + if k.parent != nil { + if k.parent.Incomplete() { + return false + } + if k.parent.namespace != k.namespace { + return false + } + } + } + return true +} + +func (k *Key) Equal(o *Key) bool { + for { + if k == nil || o == nil { + return k == o // if either is nil, both must be nil + } + if k.namespace != o.namespace || k.name != o.name || k.id != o.id || k.kind != o.kind { + return false + } + if k.parent == nil && o.parent == nil { + return true + } + k = k.parent + o = o.parent + } +} + +// marshal marshals the key's string representation to the buffer. +func (k *Key) marshal(b *bytes.Buffer) { + if k.parent != nil { + k.parent.marshal(b) + } + b.WriteByte('/') + b.WriteString(k.kind) + b.WriteByte(',') + if k.name != "" { + b.WriteString(k.name) + } else { + b.WriteString(strconv.FormatInt(k.id, 10)) + } +} + +// String returns a string representation of the key. +func (k *Key) String() string { + if k == nil { + return "" + } + b := bytes.NewBuffer(make([]byte, 0, 512)) + k.marshal(b) + return b.String() +} + +// Note: Fields not renamed compared to appengine gobKey struct +// This ensures gobs created by appengine can be read here, and vice/versa +type gobKey struct { + Kind string + StringID string + IntID int64 + Parent *gobKey + AppID string + Namespace string +} + +func keyToGobKey(k *Key) *gobKey { + if k == nil { + return nil + } + return &gobKey{ + Kind: k.kind, + StringID: k.name, + IntID: k.id, + Parent: keyToGobKey(k.parent), + Namespace: k.namespace, + } +} + +func gobKeyToKey(gk *gobKey) *Key { + if gk == nil { + return nil + } + return &Key{ + kind: gk.Kind, + name: gk.StringID, + id: gk.IntID, + parent: gobKeyToKey(gk.Parent), + namespace: gk.Namespace, + } +} + +func (k *Key) GobEncode() ([]byte, error) { + buf := new(bytes.Buffer) + if err := gob.NewEncoder(buf).Encode(keyToGobKey(k)); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (k *Key) GobDecode(buf []byte) error { + gk := new(gobKey) + if err := gob.NewDecoder(bytes.NewBuffer(buf)).Decode(gk); err != nil { + return err + } + *k = *gobKeyToKey(gk) + return nil +} + +func (k *Key) MarshalJSON() ([]byte, error) { + return []byte(`"` + k.Encode() + `"`), nil +} + +func (k *Key) UnmarshalJSON(buf []byte) error { + if len(buf) < 2 || buf[0] != '"' || buf[len(buf)-1] != '"' { + return errors.New("datastore: bad JSON key") + } + k2, err := DecodeKey(string(buf[1 : len(buf)-1])) + if err != nil { + return err + } + *k = *k2 + return nil +} + +// Encode returns an opaque representation of the key +// suitable for use in HTML and URLs. +// This is compatible with the Python and Java runtimes. +func (k *Key) Encode() string { + pKey := keyToProto(k) + + b, err := proto.Marshal(pKey) + if err != nil { + panic(err) + } + + // Trailing padding is stripped. + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// DecodeKey decodes a key from the opaque representation returned by Encode. +func DecodeKey(encoded string) (*Key, error) { + // Re-add padding. + if m := len(encoded) % 4; m != 0 { + encoded += strings.Repeat("=", 4-m) + } + + b, err := base64.URLEncoding.DecodeString(encoded) + if err != nil { + return nil, err + } + + pKey := new(pb.Key) + if err := proto.Unmarshal(b, pKey); err != nil { + return nil, err + } + + return protoToKey(pKey), nil +} + +// NewIncompleteKey creates a new incomplete key. +// kind cannot be empty. +func NewIncompleteKey(ctx context.Context, kind string, parent *Key) *Key { + return NewKey(ctx, kind, "", 0, parent) +} + +// NewKey creates a new key. +// kind cannot be empty. +// Either one or both of stringID and intID must be zero. If both are zero, +// the key returned is incomplete. +// parent must either be a complete key or nil. +func NewKey(ctx context.Context, kind, name string, id int64, parent *Key) *Key { + return &Key{ + kind: kind, + name: name, + id: id, + parent: parent, + namespace: ctxNamespace(ctx), + } +} + +// AllocateIDs accepts a slice of incomplete keys and returns a +// slice of complete keys that are guaranteed to be valid in the datastore +func (c *Client) AllocateIDs(ctx context.Context, keys []*Key) ([]*Key, error) { + if keys == nil { + return nil, nil + } + + req := &pb.AllocateIdsRequest{Key: multiKeyToProto(keys)} + res := &pb.AllocateIdsResponse{} + if err := c.call(ctx, "allocateIds", req, res); err != nil { + return nil, err + } + + return multiProtoToKey(res.Key), nil +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/datastore/key_test.go b/Godeps/_workspace/src/google.golang.org/cloud/datastore/key_test.go new file mode 100644 index 000000000..e134ffd58 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/datastore/key_test.go @@ -0,0 +1,223 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "testing" + + "golang.org/x/net/context" +) + +func TestNamespace(t *testing.T) { + c := context.Background() + k := NewIncompleteKey(c, "foo", nil) + if got, want := k.Namespace(), ""; got != want { + t.Errorf("No namespace, k.Namespace() = %q, want %q", got, want) + } + + c = WithNamespace(c, "gopherspace") + k = NewIncompleteKey(c, "foo", nil) + if got, want := k.Namespace(), "gopherspace"; got != want { + t.Errorf("No namespace, k.Namespace() = %q, want %q", got, want) + } +} + +func TestParent(t *testing.T) { + c := context.Background() + k := NewIncompleteKey(c, "foo", nil) + par := NewKey(c, "foomum", "", 1248, nil) + k.SetParent(par) + if got := k.Parent(); got != par { + t.Errorf("k.Parent() = %v; want %v", got, par) + } +} + +func TestEqual(t *testing.T) { + c := context.Background() + cN := WithNamespace(c, "gopherspace") + + testCases := []struct { + x, y *Key + equal bool + }{ + { + x: nil, + y: nil, + equal: true, + }, + { + x: NewKey(c, "kindA", "", 0, nil), + y: NewIncompleteKey(c, "kindA", nil), + equal: true, + }, + { + x: NewKey(c, "kindA", "nameA", 0, nil), + y: NewKey(c, "kindA", "nameA", 0, nil), + equal: true, + }, + { + x: NewKey(cN, "kindA", "nameA", 0, nil), + y: NewKey(cN, "kindA", "nameA", 0, nil), + equal: true, + }, + { + x: NewKey(c, "kindA", "", 1337, NewKey(c, "kindX", "nameX", 0, nil)), + y: NewKey(c, "kindA", "", 1337, NewKey(c, "kindX", "nameX", 0, nil)), + equal: true, + }, + { + x: NewKey(c, "kindA", "nameA", 0, nil), + y: NewKey(c, "kindB", "nameA", 0, nil), + equal: false, + }, + { + x: NewKey(c, "kindA", "nameA", 0, nil), + y: NewKey(c, "kindA", "nameB", 0, nil), + equal: false, + }, + { + x: NewKey(c, "kindA", "nameA", 0, nil), + y: NewKey(c, "kindA", "", 1337, nil), + equal: false, + }, + { + x: NewKey(c, "kindA", "nameA", 0, nil), + y: NewKey(cN, "kindA", "nameA", 0, nil), + equal: false, + }, + { + x: NewKey(c, "kindA", "", 1337, NewKey(c, "kindX", "nameX", 0, nil)), + y: NewKey(c, "kindA", "", 1337, NewKey(c, "kindY", "nameX", 0, nil)), + equal: false, + }, + { + x: NewKey(c, "kindA", "", 1337, NewKey(c, "kindX", "nameX", 0, nil)), + y: NewKey(c, "kindA", "", 1337, nil), + equal: false, + }, + } + + for _, tt := range testCases { + if got := tt.x.Equal(tt.y); got != tt.equal { + t.Errorf("Equal(%v, %v) = %t; want %t", tt.x, tt.y, got, tt.equal) + } + if got := tt.y.Equal(tt.x); got != tt.equal { + t.Errorf("Equal(%v, %v) = %t; want %t", tt.y, tt.x, got, tt.equal) + } + } +} + +func TestEncoding(t *testing.T) { + c := context.Background() + cN := WithNamespace(c, "gopherspace") + + testCases := []struct { + k *Key + valid bool + }{ + { + k: nil, + valid: false, + }, + { + k: NewKey(c, "", "", 0, nil), + valid: false, + }, + { + k: NewKey(c, "kindA", "", 0, nil), + valid: true, + }, + { + k: NewKey(cN, "kindA", "", 0, nil), + valid: true, + }, + { + k: NewKey(c, "kindA", "nameA", 0, nil), + valid: true, + }, + { + k: NewKey(c, "kindA", "", 1337, nil), + valid: true, + }, + { + k: NewKey(c, "kindA", "nameA", 1337, nil), + valid: false, + }, + { + k: NewKey(c, "kindA", "", 0, NewKey(c, "kindB", "nameB", 0, nil)), + valid: true, + }, + { + k: NewKey(c, "kindA", "", 0, NewKey(c, "kindB", "", 0, nil)), + valid: false, + }, + { + k: NewKey(c, "kindA", "", 0, NewKey(cN, "kindB", "nameB", 0, nil)), + valid: false, + }, + } + + for _, tt := range testCases { + if got := tt.k.valid(); got != tt.valid { + t.Errorf("valid(%v) = %t; want %t", tt.k, got, tt.valid) + } + + // Check encoding/decoding for valid keys. + if !tt.valid { + continue + } + enc := tt.k.Encode() + dec, err := DecodeKey(enc) + if err != nil { + t.Errorf("DecodeKey(%q) from %v: %v", enc, tt.k, err) + continue + } + if !tt.k.Equal(dec) { + t.Errorf("Decoded key %v not equal to %v", dec, tt.k) + } + + b, err := json.Marshal(tt.k) + if err != nil { + t.Errorf("json.Marshal(%v): %v", tt.k, err) + continue + } + key := &Key{} + if err := json.Unmarshal(b, key); err != nil { + t.Errorf("json.Unmarshal(%s) for key %v: %v", b, tt.k, err) + continue + } + if !tt.k.Equal(key) { + t.Errorf("JSON decoded key %v not equal to %v", dec, tt.k) + } + + buf := &bytes.Buffer{} + gobEnc := gob.NewEncoder(buf) + if err := gobEnc.Encode(tt.k); err != nil { + t.Errorf("gobEnc.Encode(%v): %v", tt.k, err) + continue + } + gobDec := gob.NewDecoder(buf) + key = &Key{} + if err := gobDec.Decode(key); err != nil { + t.Errorf("gobDec.Decode() for key %v: %v", tt.k, err) + } + if !tt.k.Equal(key) { + t.Errorf("gob decoded key %v not equal to %v", dec, tt.k) + } + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/datastore/load.go b/Godeps/_workspace/src/google.golang.org/cloud/datastore/load.go new file mode 100644 index 000000000..ef2e211bf --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/datastore/load.go @@ -0,0 +1,275 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "fmt" + "reflect" + "time" + + pb "google.golang.org/cloud/internal/datastore" +) + +var ( + typeOfByteSlice = reflect.TypeOf([]byte(nil)) + typeOfTime = reflect.TypeOf(time.Time{}) +) + +// typeMismatchReason returns a string explaining why the property p could not +// be stored in an entity field of type v.Type(). +func typeMismatchReason(p Property, v reflect.Value) string { + entityType := "empty" + switch p.Value.(type) { + case int64: + entityType = "int" + case bool: + entityType = "bool" + case string: + entityType = "string" + case float64: + entityType = "float" + case *Key: + entityType = "*datastore.Key" + case time.Time: + entityType = "time.Time" + case []byte: + entityType = "[]byte" + } + + return fmt.Sprintf("type mismatch: %s versus %v", entityType, v.Type()) +} + +type propertyLoader struct { + // m holds the number of times a substruct field like "Foo.Bar.Baz" has + // been seen so far. The map is constructed lazily. + m map[string]int +} + +func (l *propertyLoader) load(codec *structCodec, structValue reflect.Value, p Property, prev map[string]struct{}) string { + var sliceOk bool + var v reflect.Value + // Traverse a struct's struct-typed fields. + for name := p.Name; ; { + decoder, ok := codec.byName[name] + if !ok { + return "no such struct field" + } + v = structValue.Field(decoder.index) + if !v.IsValid() { + return "no such struct field" + } + if !v.CanSet() { + return "cannot set struct field" + } + + if decoder.substructCodec == nil { + break + } + + if v.Kind() == reflect.Slice { + if l.m == nil { + l.m = make(map[string]int) + } + index := l.m[p.Name] + l.m[p.Name] = index + 1 + for v.Len() <= index { + v.Set(reflect.Append(v, reflect.New(v.Type().Elem()).Elem())) + } + structValue = v.Index(index) + sliceOk = true + } else { + structValue = v + } + // Strip the "I." from "I.X". + name = name[len(codec.byIndex[decoder.index].name):] + codec = decoder.substructCodec + } + + var slice reflect.Value + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { + slice = v + v = reflect.New(v.Type().Elem()).Elem() + } else if _, ok := prev[p.Name]; ok && !sliceOk { + // Zero the field back out that was set previously, turns out its a slice and we don't know what to do with it + v.Set(reflect.Zero(v.Type())) + + return "multiple-valued property requires a slice field type" + } + + prev[p.Name] = struct{}{} + + pValue := p.Value + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x, ok := pValue.(int64) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + if v.OverflowInt(x) { + return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type()) + } + v.SetInt(x) + case reflect.Bool: + x, ok := pValue.(bool) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + v.SetBool(x) + case reflect.String: + x, ok := pValue.(string) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + v.SetString(x) + case reflect.Float32, reflect.Float64: + x, ok := pValue.(float64) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + if v.OverflowFloat(x) { + return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type()) + } + v.SetFloat(x) + case reflect.Ptr: + x, ok := pValue.(*Key) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + if _, ok := v.Interface().(*Key); !ok { + return typeMismatchReason(p, v) + } + v.Set(reflect.ValueOf(x)) + case reflect.Struct: + switch v.Type() { + case typeOfTime: + x, ok := pValue.(time.Time) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + v.Set(reflect.ValueOf(x)) + default: + return typeMismatchReason(p, v) + } + case reflect.Slice: + x, ok := pValue.([]byte) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + if v.Type().Elem().Kind() != reflect.Uint8 { + return typeMismatchReason(p, v) + } + v.SetBytes(x) + default: + return typeMismatchReason(p, v) + } + if slice.IsValid() { + slice.Set(reflect.Append(slice, v)) + } + return "" +} + +// loadEntity loads an EntityProto into PropertyLoadSaver or struct pointer. +func loadEntity(dst interface{}, src *pb.Entity) (err error) { + props := protoToProperties(src) + if e, ok := dst.(PropertyLoadSaver); ok { + return e.Load(props) + } + return LoadStruct(dst, props) +} + +func (s structPLS) Load(props []Property) error { + var fieldName, reason string + var l propertyLoader + + prev := make(map[string]struct{}) + for _, p := range props { + if errStr := l.load(s.codec, s.v, p, prev); errStr != "" { + // We don't return early, as we try to load as many properties as possible. + // It is valid to load an entity into a struct that cannot fully represent it. + // That case returns an error, but the caller is free to ignore it. + fieldName, reason = p.Name, errStr + } + } + if reason != "" { + return &ErrFieldMismatch{ + StructType: s.v.Type(), + FieldName: fieldName, + Reason: reason, + } + } + return nil +} + +func protoToProperties(src *pb.Entity) []Property { + props := src.Property + out := make([]Property, 0, len(props)) + for { + var ( + x *pb.Property + noIndex bool + ) + if len(props) > 0 { + x, props = props[0], props[1:] + noIndex = !x.GetValue().GetIndexed() + } else { + break + } + + if x.Value.ListValue == nil { + out = append(out, Property{ + Name: x.GetName(), + Value: propValue(x.Value), + NoIndex: noIndex, + Multiple: false, + }) + } else { + for _, v := range x.Value.ListValue { + out = append(out, Property{ + Name: x.GetName(), + Value: propValue(v), + NoIndex: noIndex, + Multiple: true, + }) + } + } + } + return out +} + +// propValue returns a Go value that combines the raw PropertyValue with a +// meaning. For example, an Int64Value with GD_WHEN becomes a time.Time. +func propValue(v *pb.Value) interface{} { + //TODO(PSG-Luna): Support EntityValue + //TODO(PSG-Luna): GeoPoint seems gone from the v1 proto, reimplement it once it's readded + switch { + case v.IntegerValue != nil: + return *v.IntegerValue + case v.TimestampMicrosecondsValue != nil: + return fromUnixMicro(*v.TimestampMicrosecondsValue) + case v.BooleanValue != nil: + return *v.BooleanValue + case v.StringValue != nil: + return *v.StringValue + case v.BlobValue != nil: + return []byte(v.BlobValue) + case v.BlobKeyValue != nil: + return *v.BlobKeyValue + case v.DoubleValue != nil: + return *v.DoubleValue + case v.KeyValue != nil: + return protoToKey(v.KeyValue) + } + return nil +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/datastore/prop.go b/Godeps/_workspace/src/google.golang.org/cloud/datastore/prop.go new file mode 100644 index 000000000..43b6c2293 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/datastore/prop.go @@ -0,0 +1,300 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "fmt" + "reflect" + "strings" + "sync" + "unicode" +) + +// Entities with more than this many indexed properties will not be saved. +const maxIndexedProperties = 5000 + +// []byte fields more than 1 megabyte long will not be loaded or saved. +const maxBlobLen = 1 << 20 + +// Property is a name/value pair plus some metadata. A datastore entity's +// contents are loaded and saved as a sequence of Properties. An entity can +// have multiple Properties with the same name, provided that p.Multiple is +// true on all of that entity's Properties with that name. +type Property struct { + // Name is the property name. + Name string + // Value is the property value. The valid types are: + // - int64 + // - bool + // - string + // - float64 + // - *Key + // - time.Time + // - []byte (up to 1 megabyte in length) + // This set is smaller than the set of valid struct field types that the + // datastore can load and save. A Property Value cannot be a slice (apart + // from []byte); use multiple Properties instead. Also, a Value's type + // must be explicitly on the list above; it is not sufficient for the + // underlying type to be on that list. For example, a Value of "type + // myInt64 int64" is invalid. Smaller-width integers and floats are also + // invalid. Again, this is more restrictive than the set of valid struct + // field types. + // + // A Value will have an opaque type when loading entities from an index, + // such as via a projection query. Load entities into a struct instead + // of a PropertyLoadSaver when using a projection query. + // + // A Value may also be the nil interface value; this is equivalent to + // Python's None but not directly representable by a Go struct. Loading + // a nil-valued property into a struct will set that field to the zero + // value. + Value interface{} + // NoIndex is whether the datastore cannot index this property. + // If NoIndex is set to false, []byte values are limited to 1500 bytes and + // string values are limited to 1500 bytes. + NoIndex bool + // Multiple is whether the entity can have multiple properties with + // the same name. Even if a particular instance only has one property with + // a certain name, Multiple should be true if a struct would best represent + // it as a field of type []T instead of type T. + Multiple bool +} + +// PropertyLoadSaver can be converted from and to a slice of Properties. +type PropertyLoadSaver interface { + Load([]Property) error + Save() ([]Property, error) +} + +// PropertyList converts a []Property to implement PropertyLoadSaver. +type PropertyList []Property + +var ( + typeOfPropertyLoadSaver = reflect.TypeOf((*PropertyLoadSaver)(nil)).Elem() + typeOfPropertyList = reflect.TypeOf(PropertyList(nil)) +) + +// Load loads all of the provided properties into l. +// It does not first reset *l to an empty slice. +func (l *PropertyList) Load(p []Property) error { + *l = append(*l, p...) + return nil +} + +// Save saves all of l's properties as a slice of Properties. +func (l *PropertyList) Save() ([]Property, error) { + return *l, nil +} + +// validPropertyName returns whether name consists of one or more valid Go +// identifiers joined by ".". +func validPropertyName(name string) bool { + if name == "" { + return false + } + for _, s := range strings.Split(name, ".") { + if s == "" { + return false + } + first := true + for _, c := range s { + if first { + first = false + if c != '_' && !unicode.IsLetter(c) { + return false + } + } else { + if c != '_' && !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + } + return true +} + +// structTag is the parsed `datastore:"name,options"` tag of a struct field. +// If a field has no tag, or the tag has an empty name, then the structTag's +// name is just the field name. A "-" name means that the datastore ignores +// that field. +type structTag struct { + name string + noIndex bool +} + +// structCodec describes how to convert a struct to and from a sequence of +// properties. +type structCodec struct { + // byIndex gives the structTag for the i'th field. + byIndex []structTag + // byName gives the field codec for the structTag with the given name. + byName map[string]fieldCodec + // hasSlice is whether a struct or any of its nested or embedded structs + // has a slice-typed field (other than []byte). + hasSlice bool + // complete is whether the structCodec is complete. An incomplete + // structCodec may be encountered when walking a recursive struct. + complete bool +} + +// fieldCodec is a struct field's index and, if that struct field's type is +// itself a struct, that substruct's structCodec. +type fieldCodec struct { + index int + substructCodec *structCodec +} + +// structCodecs collects the structCodecs that have already been calculated. +var ( + structCodecsMutex sync.Mutex + structCodecs = make(map[reflect.Type]*structCodec) +) + +// getStructCodec returns the structCodec for the given struct type. +func getStructCodec(t reflect.Type) (*structCodec, error) { + structCodecsMutex.Lock() + defer structCodecsMutex.Unlock() + return getStructCodecLocked(t) +} + +// getStructCodecLocked implements getStructCodec. The structCodecsMutex must +// be held when calling this function. +func getStructCodecLocked(t reflect.Type) (ret *structCodec, retErr error) { + c, ok := structCodecs[t] + if ok { + return c, nil + } + c = &structCodec{ + byIndex: make([]structTag, t.NumField()), + byName: make(map[string]fieldCodec), + } + + // Add c to the structCodecs map before we are sure it is good. If t is + // a recursive type, it needs to find the incomplete entry for itself in + // the map. + structCodecs[t] = c + defer func() { + if retErr != nil { + delete(structCodecs, t) + } + }() + + for i := range c.byIndex { + f := t.Field(i) + name, opts := f.Tag.Get("datastore"), "" + if i := strings.Index(name, ","); i != -1 { + name, opts = name[:i], name[i+1:] + } + if name == "" { + if !f.Anonymous { + name = f.Name + } + } else if name == "-" { + c.byIndex[i] = structTag{name: name} + continue + } else if !validPropertyName(name) { + return nil, fmt.Errorf("datastore: struct tag has invalid property name: %q", name) + } + + substructType, fIsSlice := reflect.Type(nil), false + switch f.Type.Kind() { + case reflect.Struct: + substructType = f.Type + case reflect.Slice: + if f.Type.Elem().Kind() == reflect.Struct { + substructType = f.Type.Elem() + } + fIsSlice = f.Type != typeOfByteSlice + c.hasSlice = c.hasSlice || fIsSlice + } + + if substructType != nil && substructType != typeOfTime { + if name != "" { + name = name + "." + } + sub, err := getStructCodecLocked(substructType) + if err != nil { + return nil, err + } + if !sub.complete { + return nil, fmt.Errorf("datastore: recursive struct: field %q", f.Name) + } + if fIsSlice && sub.hasSlice { + return nil, fmt.Errorf( + "datastore: flattening nested structs leads to a slice of slices: field %q", f.Name) + } + c.hasSlice = c.hasSlice || sub.hasSlice + for relName := range sub.byName { + absName := name + relName + if _, ok := c.byName[absName]; ok { + return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", absName) + } + c.byName[absName] = fieldCodec{index: i, substructCodec: sub} + } + } else { + if _, ok := c.byName[name]; ok { + return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", name) + } + c.byName[name] = fieldCodec{index: i} + } + + c.byIndex[i] = structTag{ + name: name, + noIndex: opts == "noindex", + } + } + c.complete = true + return c, nil +} + +// structPLS adapts a struct to be a PropertyLoadSaver. +type structPLS struct { + v reflect.Value + codec *structCodec +} + +// newStructPLS returns a PropertyLoadSaver for the struct pointer p. +func newStructPLS(p interface{}) (PropertyLoadSaver, error) { + v := reflect.ValueOf(p) + if v.Kind() != reflect.Ptr || v.IsNil() || v.Elem().Kind() != reflect.Struct { + return nil, ErrInvalidEntityType + } + v = v.Elem() + codec, err := getStructCodec(v.Type()) + if err != nil { + return nil, err + } + return structPLS{v, codec}, nil +} + +// LoadStruct loads the properties from p to dst. +// dst must be a struct pointer. +func LoadStruct(dst interface{}, p []Property) error { + x, err := newStructPLS(dst) + if err != nil { + return err + } + return x.Load(p) +} + +// SaveStruct returns the properties from src as a slice of Properties. +// src must be a struct pointer. +func SaveStruct(src interface{}) ([]Property, error) { + x, err := newStructPLS(src) + if err != nil { + return nil, err + } + return x.Save() +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/datastore/query.go b/Godeps/_workspace/src/google.golang.org/cloud/datastore/query.go new file mode 100644 index 000000000..3eb8a8500 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/datastore/query.go @@ -0,0 +1,755 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "encoding/base64" + "errors" + "fmt" + "math" + "reflect" + "strconv" + "strings" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + pb "google.golang.org/cloud/internal/datastore" +) + +type operator int + +const ( + lessThan operator = iota + lessEq + equal + greaterEq + greaterThan + + keyFieldName = "__key__" +) + +var operatorToProto = map[operator]*pb.PropertyFilter_Operator{ + lessThan: pb.PropertyFilter_LESS_THAN.Enum(), + lessEq: pb.PropertyFilter_LESS_THAN_OR_EQUAL.Enum(), + equal: pb.PropertyFilter_EQUAL.Enum(), + greaterEq: pb.PropertyFilter_GREATER_THAN_OR_EQUAL.Enum(), + greaterThan: pb.PropertyFilter_GREATER_THAN.Enum(), +} + +// filter is a conditional filter on query results. +type filter struct { + FieldName string + Op operator + Value interface{} +} + +type sortDirection int + +const ( + ascending sortDirection = iota + descending +) + +var sortDirectionToProto = map[sortDirection]*pb.PropertyOrder_Direction{ + ascending: pb.PropertyOrder_ASCENDING.Enum(), + descending: pb.PropertyOrder_DESCENDING.Enum(), +} + +// order is a sort order on query results. +type order struct { + FieldName string + Direction sortDirection +} + +// NewQuery creates a new Query for a specific entity kind. +// +// An empty kind means to return all entities, including entities created and +// managed by other App Engine features, and is called a kindless query. +// Kindless queries cannot include filters or sort orders on property values. +func NewQuery(kind string) *Query { + return &Query{ + kind: kind, + limit: -1, + } +} + +// Query represents a datastore query. +type Query struct { + kind string + ancestor *Key + filter []filter + order []order + projection []string + + distinct bool + keysOnly bool + eventual bool + limit int32 + offset int32 + start []byte + end []byte + + trans *Transaction + + err error +} + +func (q *Query) clone() *Query { + x := *q + // Copy the contents of the slice-typed fields to a new backing store. + if len(q.filter) > 0 { + x.filter = make([]filter, len(q.filter)) + copy(x.filter, q.filter) + } + if len(q.order) > 0 { + x.order = make([]order, len(q.order)) + copy(x.order, q.order) + } + return &x +} + +// Ancestor returns a derivative query with an ancestor filter. +// The ancestor should not be nil. +func (q *Query) Ancestor(ancestor *Key) *Query { + q = q.clone() + if ancestor == nil { + q.err = errors.New("datastore: nil query ancestor") + return q + } + q.ancestor = ancestor + return q +} + +// EventualConsistency returns a derivative query that returns eventually +// consistent results. +// It only has an effect on ancestor queries. +func (q *Query) EventualConsistency() *Query { + q = q.clone() + q.eventual = true + return q +} + +// Transaction returns a derivative query that is associated with the given +// transaction. +// +// All reads performed as part of the transaction will come from a single +// consistent snapshot. Furthermore, if the transaction is set to a +// serializable isolation level, another transaction cannot concurrently modify +// the data that is read or modified by this transaction. +func (q *Query) Transaction(t *Transaction) *Query { + q = q.clone() + q.trans = t + return q +} + +// Filter returns a derivative query with a field-based filter. +// The filterStr argument must be a field name followed by optional space, +// followed by an operator, one of ">", "<", ">=", "<=", or "=". +// Fields are compared against the provided value using the operator. +// Multiple filters are AND'ed together. +// Field names which contain spaces, quote marks, or operator characters +// should be passed as quoted Go string literals as returned by strconv.Quote +// or the fmt package's %q verb. +func (q *Query) Filter(filterStr string, value interface{}) *Query { + q = q.clone() + filterStr = strings.TrimSpace(filterStr) + if filterStr == "" { + q.err = fmt.Errorf("datastore: invalid filter %q", filterStr) + return q + } + f := filter{ + FieldName: strings.TrimRight(filterStr, " ><=!"), + Value: value, + } + switch op := strings.TrimSpace(filterStr[len(f.FieldName):]); op { + case "<=": + f.Op = lessEq + case ">=": + f.Op = greaterEq + case "<": + f.Op = lessThan + case ">": + f.Op = greaterThan + case "=": + f.Op = equal + default: + q.err = fmt.Errorf("datastore: invalid operator %q in filter %q", op, filterStr) + return q + } + var err error + f.FieldName, err = unquote(f.FieldName) + if err != nil { + q.err = fmt.Errorf("datastore: invalid syntax for quoted field name %q", f.FieldName) + return q + } + q.filter = append(q.filter, f) + return q +} + +// Order returns a derivative query with a field-based sort order. Orders are +// applied in the order they are added. The default order is ascending; to sort +// in descending order prefix the fieldName with a minus sign (-). +// Field names which contain spaces, quote marks, or the minus sign +// should be passed as quoted Go string literals as returned by strconv.Quote +// or the fmt package's %q verb. +func (q *Query) Order(fieldName string) *Query { + q = q.clone() + fieldName, dir := strings.TrimSpace(fieldName), ascending + if strings.HasPrefix(fieldName, "-") { + fieldName, dir = strings.TrimSpace(fieldName[1:]), descending + } else if strings.HasPrefix(fieldName, "+") { + q.err = fmt.Errorf("datastore: invalid order: %q", fieldName) + return q + } + fieldName, err := unquote(fieldName) + if err != nil { + q.err = fmt.Errorf("datastore: invalid syntax for quoted field name %q", fieldName) + return q + } + if fieldName == "" { + q.err = errors.New("datastore: empty order") + return q + } + q.order = append(q.order, order{ + Direction: dir, + FieldName: fieldName, + }) + return q +} + +// unquote optionally interprets s as a double-quoted or backquoted Go +// string literal if it begins with the relevant character. +func unquote(s string) (string, error) { + if s == "" || (s[0] != '`' && s[0] != '"') { + return s, nil + } + return strconv.Unquote(s) +} + +// Project returns a derivative query that yields only the given fields. It +// cannot be used with KeysOnly. +func (q *Query) Project(fieldNames ...string) *Query { + q = q.clone() + q.projection = append([]string(nil), fieldNames...) + return q +} + +// Distinct returns a derivative query that yields de-duplicated entities with +// respect to the set of projected fields. It is only used for projection +// queries. +func (q *Query) Distinct() *Query { + q = q.clone() + q.distinct = true + return q +} + +// KeysOnly returns a derivative query that yields only keys, not keys and +// entities. It cannot be used with projection queries. +func (q *Query) KeysOnly() *Query { + q = q.clone() + q.keysOnly = true + return q +} + +// Limit returns a derivative query that has a limit on the number of results +// returned. A negative value means unlimited. +func (q *Query) Limit(limit int) *Query { + q = q.clone() + if limit < math.MinInt32 || limit > math.MaxInt32 { + q.err = errors.New("datastore: query limit overflow") + return q + } + q.limit = int32(limit) + return q +} + +// Offset returns a derivative query that has an offset of how many keys to +// skip over before returning results. A negative value is invalid. +func (q *Query) Offset(offset int) *Query { + q = q.clone() + if offset < 0 { + q.err = errors.New("datastore: negative query offset") + return q + } + if offset > math.MaxInt32 { + q.err = errors.New("datastore: query offset overflow") + return q + } + q.offset = int32(offset) + return q +} + +// Start returns a derivative query with the given start point. +func (q *Query) Start(c Cursor) *Query { + q = q.clone() + if c.cc == nil { + q.err = errors.New("datastore: invalid cursor") + return q + } + q.start = c.cc + return q +} + +// End returns a derivative query with the given end point. +func (q *Query) End(c Cursor) *Query { + q = q.clone() + if c.cc == nil { + q.err = errors.New("datastore: invalid cursor") + return q + } + q.end = c.cc + return q +} + +// toProto converts the query to a protocol buffer. +func (q *Query) toProto(req *pb.RunQueryRequest) error { + dst := pb.Query{} + if len(q.projection) != 0 && q.keysOnly { + return errors.New("datastore: query cannot both project and be keys-only") + } + dst.Reset() + if q.kind != "" { + dst.Kind = []*pb.KindExpression{&pb.KindExpression{Name: proto.String(q.kind)}} + } + if q.projection != nil { + for _, propertyName := range q.projection { + dst.Projection = append(dst.Projection, &pb.PropertyExpression{Property: &pb.PropertyReference{Name: proto.String(propertyName)}}) + } + + if q.distinct { + for _, propertyName := range q.projection { + dst.GroupBy = append(dst.GroupBy, &pb.PropertyReference{Name: proto.String(propertyName)}) + } + } + } + if q.keysOnly { + dst.Projection = []*pb.PropertyExpression{&pb.PropertyExpression{Property: &pb.PropertyReference{Name: proto.String(keyFieldName)}}} + } + + var filters []*pb.Filter + for _, qf := range q.filter { + if qf.FieldName == "" { + return errors.New("datastore: empty query filter field name") + } + v, errStr := interfaceToProto(reflect.ValueOf(qf.Value).Interface()) + if errStr != "" { + return errors.New("datastore: bad query filter value type: " + errStr) + } + xf := &pb.PropertyFilter{ + Operator: operatorToProto[qf.Op], + Property: &pb.PropertyReference{Name: proto.String(qf.FieldName)}, + Value: v, + } + if xf.Operator == nil { + return errors.New("datastore: unknown query filter operator") + } + filters = append(filters, &pb.Filter{PropertyFilter: xf}) + } + + if q.ancestor != nil { + filters = append(filters, &pb.Filter{ + PropertyFilter: &pb.PropertyFilter{ + Property: &pb.PropertyReference{Name: proto.String("__key__")}, + Operator: pb.PropertyFilter_HAS_ANCESTOR.Enum(), + Value: &pb.Value{KeyValue: keyToProto(q.ancestor)}, + }}) + } + + if len(filters) == 1 { + dst.Filter = filters[0] + } else if len(filters) > 1 { + dst.Filter = &pb.Filter{CompositeFilter: &pb.CompositeFilter{ + Operator: pb.CompositeFilter_AND.Enum(), + Filter: filters, + }} + } + + for _, qo := range q.order { + if qo.FieldName == "" { + return errors.New("datastore: empty query order field name") + } + xo := &pb.PropertyOrder{ + Property: &pb.PropertyReference{Name: proto.String(qo.FieldName)}, + Direction: sortDirectionToProto[qo.Direction], + } + if xo.Direction == nil { + return errors.New("datastore: unknown query order direction") + } + dst.Order = append(dst.Order, xo) + } + if q.limit >= 0 { + dst.Limit = proto.Int32(q.limit) + } + if q.offset != 0 { + dst.Offset = proto.Int32(q.offset) + } + dst.StartCursor = q.start + dst.EndCursor = q.end + + if t := q.trans; t != nil { + if t.id == nil { + return errExpiredTransaction + } + req.ReadOptions = &pb.ReadOptions{Transaction: t.id} + } + + req.Query = &dst + return nil +} + +// Count returns the number of results for the given query. +func (c *Client) Count(ctx context.Context, q *Query) (int, error) { + // Check that the query is well-formed. + if q.err != nil { + return 0, q.err + } + + // Run a copy of the query, with keysOnly true (if we're not a projection, + // since the two are incompatible). + newQ := q.clone() + newQ.keysOnly = len(newQ.projection) == 0 + req := &pb.RunQueryRequest{} + + if ns := ctxNamespace(ctx); ns != "" { + req.PartitionId = &pb.PartitionId{ + Namespace: proto.String(ns), + } + } + if err := newQ.toProto(req); err != nil { + return 0, err + } + res := &pb.RunQueryResponse{} + if err := c.call(ctx, "runQuery", req, res); err != nil { + return 0, err + } + var n int + b := res.Batch + for { + n += len(b.GetEntityResult()) + if b.GetMoreResults() != pb.QueryResultBatch_NOT_FINISHED { + break + } + var err error + // TODO(jbd): Support count queries that have a limit and an offset. + if err = callNext(ctx, c, req, res, 0, 0); err != nil { + return 0, err + } + } + return int(n), nil +} + +func callNext(ctx context.Context, client *Client, req *pb.RunQueryRequest, res *pb.RunQueryResponse, offset, limit int32) error { + if res.GetBatch().EndCursor == nil { + return errors.New("datastore: internal error: server did not return a cursor") + } + req.Query.StartCursor = res.GetBatch().GetEndCursor() + if limit >= 0 { + req.Query.Limit = proto.Int32(limit) + } + if offset != 0 { + req.Query.Offset = proto.Int32(offset) + } + res.Reset() + return client.call(ctx, "runQuery", req, res) +} + +// GetAll runs the provided query in the given context and returns all keys +// that match that query, as well as appending the values to dst. +// +// dst must have type *[]S or *[]*S or *[]P, for some struct type S or some non- +// interface, non-pointer type P such that P or *P implements PropertyLoadSaver. +// +// As a special case, *PropertyList is an invalid type for dst, even though a +// PropertyList is a slice of structs. It is treated as invalid to avoid being +// mistakenly passed when *[]PropertyList was intended. +// +// The keys returned by GetAll will be in a 1-1 correspondence with the entities +// added to dst. +// +// If q is a ``keys-only'' query, GetAll ignores dst and only returns the keys. +func (c *Client) GetAll(ctx context.Context, q *Query, dst interface{}) ([]*Key, error) { + var ( + dv reflect.Value + mat multiArgType + elemType reflect.Type + errFieldMismatch error + ) + if !q.keysOnly { + dv = reflect.ValueOf(dst) + if dv.Kind() != reflect.Ptr || dv.IsNil() { + return nil, ErrInvalidEntityType + } + dv = dv.Elem() + mat, elemType = checkMultiArg(dv) + if mat == multiArgTypeInvalid || mat == multiArgTypeInterface { + return nil, ErrInvalidEntityType + } + } + + var keys []*Key + for t := c.Run(ctx, q); ; { + k, e, err := t.next() + if err == Done { + break + } + if err != nil { + return keys, err + } + if !q.keysOnly { + ev := reflect.New(elemType) + if elemType.Kind() == reflect.Map { + // This is a special case. The zero values of a map type are + // not immediately useful; they have to be make'd. + // + // Funcs and channels are similar, in that a zero value is not useful, + // but even a freshly make'd channel isn't useful: there's no fixed + // channel buffer size that is always going to be large enough, and + // there's no goroutine to drain the other end. Theoretically, these + // types could be supported, for example by sniffing for a constructor + // method or requiring prior registration, but for now it's not a + // frequent enough concern to be worth it. Programmers can work around + // it by explicitly using Iterator.Next instead of the Query.GetAll + // convenience method. + x := reflect.MakeMap(elemType) + ev.Elem().Set(x) + } + if err = loadEntity(ev.Interface(), e); err != nil { + if _, ok := err.(*ErrFieldMismatch); ok { + // We continue loading entities even in the face of field mismatch errors. + // If we encounter any other error, that other error is returned. Otherwise, + // an ErrFieldMismatch is returned. + errFieldMismatch = err + } else { + return keys, err + } + } + if mat != multiArgTypeStructPtr { + ev = ev.Elem() + } + dv.Set(reflect.Append(dv, ev)) + } + keys = append(keys, k) + } + return keys, errFieldMismatch +} + +// Run runs the given query in the given context. +func (c *Client) Run(ctx context.Context, q *Query) *Iterator { + if q.err != nil { + return &Iterator{err: q.err} + } + t := &Iterator{ + ctx: ctx, + client: c, + limit: q.limit, + q: q, + prevCC: q.start, + } + t.req.Reset() + if ns := ctxNamespace(ctx); ns != "" { + t.req.PartitionId = &pb.PartitionId{ + Namespace: proto.String(ns), + } + } + if err := q.toProto(&t.req); err != nil { + t.err = err + return t + } + if err := c.call(ctx, "runQuery", &t.req, &t.res); err != nil { + t.err = err + return t + } + b := t.res.GetBatch() + offset := q.offset - b.GetSkippedResults() + for offset > 0 && b.GetMoreResults() == pb.QueryResultBatch_NOT_FINISHED { + t.prevCC = b.GetEndCursor() + var err error + if err = callNext(t.ctx, c, &t.req, &t.res, offset, t.limit); err != nil { + t.err = err + break + } + skip := b.GetSkippedResults() + if skip < 0 { + t.err = errors.New("datastore: internal error: negative number of skipped_results") + break + } + offset -= skip + } + if offset < 0 { + t.err = errors.New("datastore: internal error: query offset was overshot") + } + return t +} + +// Iterator is the result of running a query. +type Iterator struct { + ctx context.Context + client *Client + err error + // req is the request we sent previously, we need to keep track of it to resend it + req pb.RunQueryRequest + // res is the result of the most recent RunQuery or Next API call. + res pb.RunQueryResponse + // i is how many elements of res.Result we have iterated over. + i int + // limit is the limit on the number of results this iterator should return. + // A negative value means unlimited. + limit int32 + // q is the original query which yielded this iterator. + q *Query + // prevCC is the compiled cursor that marks the end of the previous batch + // of results. + prevCC []byte +} + +// Done is returned when a query iteration has completed. +var Done = errors.New("datastore: query has no more results") + +// Next returns the key of the next result. When there are no more results, +// Done is returned as the error. +// +// If the query is not keys only and dst is non-nil, it also loads the entity +// stored for that key into the struct pointer or PropertyLoadSaver dst, with +// the same semantics and possible errors as for the Get function. +func (t *Iterator) Next(dst interface{}) (*Key, error) { + k, e, err := t.next() + if err != nil { + return nil, err + } + if dst != nil && !t.q.keysOnly { + err = loadEntity(dst, e) + } + return k, err +} + +func (t *Iterator) next() (*Key, *pb.Entity, error) { + if t.err != nil { + return nil, nil, t.err + } + + // Issue datastore_v3/Next RPCs as necessary. + b := t.res.GetBatch() + for t.i == len(b.EntityResult) { + if b.GetMoreResults() != pb.QueryResultBatch_NOT_FINISHED { + t.err = Done + return nil, nil, t.err + } + t.prevCC = b.GetEndCursor() + if err := callNext(t.ctx, t.client, &t.req, &t.res, 0, t.limit); err != nil { + t.err = err + return nil, nil, t.err + } + if b.GetSkippedResults() != 0 { + t.err = errors.New("datastore: internal error: iterator has skipped results") + return nil, nil, t.err + } + t.i = 0 + if t.limit >= 0 { + t.limit -= int32(len(b.EntityResult)) + if t.limit < 0 { + t.err = errors.New("datastore: internal error: query returned more results than the limit") + return nil, nil, t.err + } + } + } + + // Extract the key from the t.i'th element of t.res.Result. + e := b.EntityResult[t.i] + t.i++ + if e.Entity.Key == nil { + return nil, nil, errors.New("datastore: internal error: server did not return a key") + } + k := protoToKey(e.Entity.Key) + if k.Incomplete() { + return nil, nil, errors.New("datastore: internal error: server returned an invalid key") + } + return k, e.Entity, nil +} + +// Cursor returns a cursor for the iterator's current location. +func (t *Iterator) Cursor() (Cursor, error) { + if t.err != nil && t.err != Done { + return Cursor{}, t.err + } + // If we are at either end of the current batch of results, + // return the compiled cursor at that end. + b := t.res.Batch + skipped := b.GetSkippedResults() + if t.i == 0 && skipped == 0 { + if t.prevCC == nil { + // A nil pointer (of type *pb.CompiledCursor) means no constraint: + // passing it as the end cursor of a new query means unlimited results + // (glossing over the integer limit parameter for now). + // A non-nil pointer to an empty pb.CompiledCursor means the start: + // passing it as the end cursor of a new query means 0 results. + // If prevCC was nil, then the original query had no start cursor, but + // Iterator.Cursor should return "the start" instead of unlimited. + return Cursor{}, nil + } + return Cursor{t.prevCC}, nil + } + if t.i == len(b.EntityResult) { + return Cursor{b.EndCursor}, nil + } + // Otherwise, re-run the query offset to this iterator's position, starting from + // the most recent compiled cursor. This is done on a best-effort basis, as it + // is racy; if a concurrent process has added or removed entities, then the + // cursor returned may be inconsistent. + q := t.q.clone() + q.start = t.prevCC + q.offset = skipped + int32(t.i) + q.limit = 0 + q.keysOnly = len(q.projection) == 0 + t1 := t.client.Run(t.ctx, t.q) + _, _, err := t1.next() + if err != Done { + if err == nil { + err = fmt.Errorf("datastore: internal error: zero-limit query did not have zero results") + } + return Cursor{}, err + } + return Cursor{t1.res.Batch.EndCursor}, nil +} + +// Cursor is an iterator's position. It can be converted to and from an opaque +// string. A cursor can be used from different HTTP requests, but only with a +// query with the same kind, ancestor, filter and order constraints. +type Cursor struct { + cc []byte +} + +// String returns a base-64 string representation of a cursor. +func (c Cursor) String() string { + if c.cc == nil { + return "" + } + + return strings.TrimRight(base64.URLEncoding.EncodeToString(c.cc), "=") +} + +// Decode decodes a cursor from its base-64 string representation. +func DecodeCursor(s string) (Cursor, error) { + if s == "" { + return Cursor{}, nil + } + if n := len(s) % 4; n != 0 { + s += strings.Repeat("=", 4-n) + } + b, err := base64.URLEncoding.DecodeString(s) + if err != nil { + return Cursor{}, err + } + return Cursor{b}, nil +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/datastore/query_test.go b/Godeps/_workspace/src/google.golang.org/cloud/datastore/query_test.go new file mode 100644 index 000000000..5d0caaff5 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/datastore/query_test.go @@ -0,0 +1,482 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "errors" + "fmt" + "reflect" + "testing" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + pb "google.golang.org/cloud/internal/datastore" +) + +var ( + key1 = &pb.Key{ + PathElement: []*pb.Key_PathElement{ + { + Kind: proto.String("Gopher"), + Id: proto.Int64(6), + }, + }, + } + key2 = &pb.Key{ + PathElement: []*pb.Key_PathElement{ + { + Kind: proto.String("Gopher"), + Id: proto.Int64(6), + }, + { + Kind: proto.String("Gopher"), + Id: proto.Int64(8), + }, + }, + } +) + +type fakeClient func(req, resp proto.Message) (err error) + +func (c fakeClient) Call(ctx context.Context, method string, req, resp proto.Message) error { + return c(req, resp) +} + +func fakeRunQuery(in *pb.RunQueryRequest, out *pb.RunQueryResponse) error { + expectedIn := &pb.RunQueryRequest{ + Query: &pb.Query{ + Kind: []*pb.KindExpression{&pb.KindExpression{Name: proto.String("Gopher")}}, + }, + } + if !proto.Equal(in, expectedIn) { + return fmt.Errorf("unsupported argument: got %v want %v", in, expectedIn) + } + *out = pb.RunQueryResponse{ + Batch: &pb.QueryResultBatch{ + MoreResults: pb.QueryResultBatch_NO_MORE_RESULTS.Enum(), + EntityResultType: pb.EntityResult_FULL.Enum(), + EntityResult: []*pb.EntityResult{ + &pb.EntityResult{ + Entity: &pb.Entity{ + Key: key1, + Property: []*pb.Property{ + { + Name: proto.String("Name"), + Value: &pb.Value{StringValue: proto.String("George")}, + }, + { + Name: proto.String("Height"), + Value: &pb.Value{ + IntegerValue: proto.Int64(32), + }, + }, + }, + }, + }, + &pb.EntityResult{ + Entity: &pb.Entity{ + Key: key2, + Property: []*pb.Property{ + { + Name: proto.String("Name"), + Value: &pb.Value{StringValue: proto.String("Rufus")}, + }, + // No height for Rufus. + }, + }, + }, + }, + }, + } + return nil +} + +type StructThatImplementsPLS struct{} + +func (StructThatImplementsPLS) Load(p []Property) error { return nil } +func (StructThatImplementsPLS) Save() ([]Property, error) { return nil, nil } + +var _ PropertyLoadSaver = StructThatImplementsPLS{} + +type StructPtrThatImplementsPLS struct{} + +func (*StructPtrThatImplementsPLS) Load(p []Property) error { return nil } +func (*StructPtrThatImplementsPLS) Save() ([]Property, error) { return nil, nil } + +var _ PropertyLoadSaver = &StructPtrThatImplementsPLS{} + +type PropertyMap map[string]Property + +func (m PropertyMap) Load(props []Property) error { + for _, p := range props { + m[p.Name] = p + } + return nil +} + +func (m PropertyMap) Save() ([]Property, error) { + props := make([]Property, 0, len(m)) + for _, p := range m { + props = append(props, p) + } + return props, nil +} + +var _ PropertyLoadSaver = PropertyMap{} + +type Gopher struct { + Name string + Height int +} + +// typeOfEmptyInterface is the type of interface{}, but we can't use +// reflect.TypeOf((interface{})(nil)) directly because TypeOf takes an +// interface{}. +var typeOfEmptyInterface = reflect.TypeOf((*interface{})(nil)).Elem() + +func TestCheckMultiArg(t *testing.T) { + testCases := []struct { + v interface{} + mat multiArgType + elemType reflect.Type + }{ + // Invalid cases. + {nil, multiArgTypeInvalid, nil}, + {Gopher{}, multiArgTypeInvalid, nil}, + {&Gopher{}, multiArgTypeInvalid, nil}, + {PropertyList{}, multiArgTypeInvalid, nil}, // This is a special case. + {PropertyMap{}, multiArgTypeInvalid, nil}, + {[]*PropertyList(nil), multiArgTypeInvalid, nil}, + {[]*PropertyMap(nil), multiArgTypeInvalid, nil}, + {[]**Gopher(nil), multiArgTypeInvalid, nil}, + {[]*interface{}(nil), multiArgTypeInvalid, nil}, + // Valid cases. + { + []PropertyList(nil), + multiArgTypePropertyLoadSaver, + reflect.TypeOf(PropertyList{}), + }, + { + []PropertyMap(nil), + multiArgTypePropertyLoadSaver, + reflect.TypeOf(PropertyMap{}), + }, + { + []StructThatImplementsPLS(nil), + multiArgTypePropertyLoadSaver, + reflect.TypeOf(StructThatImplementsPLS{}), + }, + { + []StructPtrThatImplementsPLS(nil), + multiArgTypePropertyLoadSaver, + reflect.TypeOf(StructPtrThatImplementsPLS{}), + }, + { + []Gopher(nil), + multiArgTypeStruct, + reflect.TypeOf(Gopher{}), + }, + { + []*Gopher(nil), + multiArgTypeStructPtr, + reflect.TypeOf(Gopher{}), + }, + { + []interface{}(nil), + multiArgTypeInterface, + typeOfEmptyInterface, + }, + } + for _, tc := range testCases { + mat, elemType := checkMultiArg(reflect.ValueOf(tc.v)) + if mat != tc.mat || elemType != tc.elemType { + t.Errorf("checkMultiArg(%T): got %v, %v want %v, %v", + tc.v, mat, elemType, tc.mat, tc.elemType) + } + } +} + +func TestSimpleQuery(t *testing.T) { + struct1 := Gopher{Name: "George", Height: 32} + struct2 := Gopher{Name: "Rufus"} + pList1 := PropertyList{ + { + Name: "Name", + Value: "George", + }, + { + Name: "Height", + Value: int64(32), + }, + } + pList2 := PropertyList{ + { + Name: "Name", + Value: "Rufus", + }, + } + pMap1 := PropertyMap{ + "Name": Property{ + Name: "Name", + Value: "George", + }, + "Height": Property{ + Name: "Height", + Value: int64(32), + }, + } + pMap2 := PropertyMap{ + "Name": Property{ + Name: "Name", + Value: "Rufus", + }, + } + + testCases := []struct { + dst interface{} + want interface{} + }{ + // The destination must have type *[]P, *[]S or *[]*S, for some non-interface + // type P such that *P implements PropertyLoadSaver, or for some struct type S. + {new([]Gopher), &[]Gopher{struct1, struct2}}, + {new([]*Gopher), &[]*Gopher{&struct1, &struct2}}, + {new([]PropertyList), &[]PropertyList{pList1, pList2}}, + {new([]PropertyMap), &[]PropertyMap{pMap1, pMap2}}, + + // Any other destination type is invalid. + {0, nil}, + {Gopher{}, nil}, + {PropertyList{}, nil}, + {PropertyMap{}, nil}, + {[]int{}, nil}, + {[]Gopher{}, nil}, + {[]PropertyList{}, nil}, + {new(int), nil}, + {new(Gopher), nil}, + {new(PropertyList), nil}, // This is a special case. + {new(PropertyMap), nil}, + {new([]int), nil}, + {new([]map[int]int), nil}, + {new([]map[string]Property), nil}, + {new([]map[string]interface{}), nil}, + {new([]*int), nil}, + {new([]*map[int]int), nil}, + {new([]*map[string]Property), nil}, + {new([]*map[string]interface{}), nil}, + {new([]**Gopher), nil}, + {new([]*PropertyList), nil}, + {new([]*PropertyMap), nil}, + } + for _, tc := range testCases { + nCall := 0 + client := &Client{ + client: fakeClient(func(in, out proto.Message) error { + nCall++ + return fakeRunQuery(in.(*pb.RunQueryRequest), out.(*pb.RunQueryResponse)) + }), + } + ctx := context.Background() + + var ( + expectedErr error + expectedNCall int + ) + if tc.want == nil { + expectedErr = ErrInvalidEntityType + } else { + expectedNCall = 1 + } + keys, err := client.GetAll(ctx, NewQuery("Gopher"), tc.dst) + if err != expectedErr { + t.Errorf("dst type %T: got error %v, want %v", tc.dst, err, expectedErr) + continue + } + if nCall != expectedNCall { + t.Errorf("dst type %T: Context.Call was called an incorrect number of times: got %d want %d", tc.dst, nCall, expectedNCall) + continue + } + if err != nil { + continue + } + + key1 := NewKey(ctx, "Gopher", "", 6, nil) + expectedKeys := []*Key{ + key1, + NewKey(ctx, "Gopher", "", 8, key1), + } + if l1, l2 := len(keys), len(expectedKeys); l1 != l2 { + t.Errorf("dst type %T: got %d keys, want %d keys", tc.dst, l1, l2) + continue + } + for i, key := range keys { + if !keysEqual(key, expectedKeys[i]) { + t.Errorf("dst type %T: got key #%d %v, want %v", tc.dst, i, key, expectedKeys[i]) + continue + } + } + + if !reflect.DeepEqual(tc.dst, tc.want) { + t.Errorf("dst type %T: Entities got %+v, want %+v", tc.dst, tc.dst, tc.want) + continue + } + } +} + +// keysEqual is like (*Key).Equal, but ignores the App ID. +func keysEqual(a, b *Key) bool { + for a != nil && b != nil { + if a.Kind() != b.Kind() || a.Name() != b.Name() || a.ID() != b.ID() { + return false + } + a, b = a.Parent(), b.Parent() + } + return a == b +} + +func TestQueriesAreImmutable(t *testing.T) { + // Test that deriving q2 from q1 does not modify q1. + q0 := NewQuery("foo") + q1 := NewQuery("foo") + q2 := q1.Offset(2) + if !reflect.DeepEqual(q0, q1) { + t.Errorf("q0 and q1 were not equal") + } + if reflect.DeepEqual(q1, q2) { + t.Errorf("q1 and q2 were equal") + } + + // Test that deriving from q4 twice does not conflict, even though + // q4 has a long list of order clauses. This tests that the arrays + // backed by a query's slice of orders are not shared. + f := func() *Query { + q := NewQuery("bar") + // 47 is an ugly number that is unlikely to be near a re-allocation + // point in repeated append calls. For example, it's not near a power + // of 2 or a multiple of 10. + for i := 0; i < 47; i++ { + q = q.Order(fmt.Sprintf("x%d", i)) + } + return q + } + q3 := f().Order("y") + q4 := f() + q5 := q4.Order("y") + q6 := q4.Order("z") + if !reflect.DeepEqual(q3, q5) { + t.Errorf("q3 and q5 were not equal") + } + if reflect.DeepEqual(q5, q6) { + t.Errorf("q5 and q6 were equal") + } +} + +func TestFilterParser(t *testing.T) { + testCases := []struct { + filterStr string + wantOK bool + wantFieldName string + wantOp operator + }{ + // Supported ops. + {"x<", true, "x", lessThan}, + {"x <", true, "x", lessThan}, + {"x <", true, "x", lessThan}, + {" x < ", true, "x", lessThan}, + {"x <=", true, "x", lessEq}, + {"x =", true, "x", equal}, + {"x >=", true, "x", greaterEq}, + {"x >", true, "x", greaterThan}, + {"in >", true, "in", greaterThan}, + {"in>", true, "in", greaterThan}, + // Valid but (currently) unsupported ops. + {"x!=", false, "", 0}, + {"x !=", false, "", 0}, + {" x != ", false, "", 0}, + {"x IN", false, "", 0}, + {"x in", false, "", 0}, + // Invalid ops. + {"x EQ", false, "", 0}, + {"x lt", false, "", 0}, + {"x <>", false, "", 0}, + {"x >>", false, "", 0}, + {"x ==", false, "", 0}, + {"x =<", false, "", 0}, + {"x =>", false, "", 0}, + {"x !", false, "", 0}, + {"x ", false, "", 0}, + {"x", false, "", 0}, + // Quoted and interesting field names. + {"x > y =", true, "x > y", equal}, + {"` x ` =", true, " x ", equal}, + {`" x " =`, true, " x ", equal}, + {`" \"x " =`, true, ` "x `, equal}, + {`" x =`, false, "", 0}, + {`" x ="`, false, "", 0}, + {"` x \" =", false, "", 0}, + } + for _, tc := range testCases { + q := NewQuery("foo").Filter(tc.filterStr, 42) + if ok := q.err == nil; ok != tc.wantOK { + t.Errorf("%q: ok=%t, want %t", tc.filterStr, ok, tc.wantOK) + continue + } + if !tc.wantOK { + continue + } + if len(q.filter) != 1 { + t.Errorf("%q: len=%d, want %d", tc.filterStr, len(q.filter), 1) + continue + } + got, want := q.filter[0], filter{tc.wantFieldName, tc.wantOp, 42} + if got != want { + t.Errorf("%q: got %v, want %v", tc.filterStr, got, want) + continue + } + } +} + +func TestNamespaceQuery(t *testing.T) { + gotNamespace := make(chan string, 1) + ctx := context.Background() + client := &Client{ + client: fakeClient(func(req, resp proto.Message) error { + gotNamespace <- req.(*pb.RunQueryRequest).GetPartitionId().GetNamespace() + return errors.New("not implemented") + }), + } + + var gs []Gopher + + client.GetAll(ctx, NewQuery("gopher"), &gs) + if got, want := <-gotNamespace, ""; got != want { + t.Errorf("GetAll: got namespace %q, want %q", got, want) + } + client.Count(ctx, NewQuery("gopher")) + if got, want := <-gotNamespace, ""; got != want { + t.Errorf("Count: got namespace %q, want %q", got, want) + } + + const ns = "not_default" + ctx = WithNamespace(ctx, ns) + + client.GetAll(ctx, NewQuery("gopher"), &gs) + if got, want := <-gotNamespace, ns; got != want { + t.Errorf("GetAll: got namespace %q, want %q", got, want) + } + client.Count(ctx, NewQuery("gopher")) + if got, want := <-gotNamespace, ns; got != want { + t.Errorf("Count: got namespace %q, want %q", got, want) + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/datastore/save.go b/Godeps/_workspace/src/google.golang.org/cloud/datastore/save.go new file mode 100644 index 000000000..a208000aa --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/datastore/save.go @@ -0,0 +1,210 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "errors" + "fmt" + "reflect" + "time" + + "github.com/golang/protobuf/proto" + pb "google.golang.org/cloud/internal/datastore" +) + +// saveEntity saves an EntityProto into a PropertyLoadSaver or struct pointer. +func saveEntity(key *Key, src interface{}) (*pb.Entity, error) { + var err error + var props []Property + if e, ok := src.(PropertyLoadSaver); ok { + props, err = e.Save() + } else { + props, err = SaveStruct(src) + } + if err != nil { + return nil, err + } + return propertiesToProto(key, props) +} + +func saveStructProperty(props *[]Property, name string, noIndex, multiple bool, v reflect.Value) error { + p := Property{ + Name: name, + NoIndex: noIndex, + Multiple: multiple, + } + + switch x := v.Interface().(type) { + case *Key, time.Time: + p.Value = x + default: + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p.Value = v.Int() + case reflect.Bool: + p.Value = v.Bool() + case reflect.String: + p.Value = v.String() + case reflect.Float32, reflect.Float64: + p.Value = v.Float() + case reflect.Slice: + if v.Type().Elem().Kind() == reflect.Uint8 { + p.Value = v.Bytes() + } + case reflect.Struct: + if !v.CanAddr() { + return fmt.Errorf("datastore: unsupported struct field: value is unaddressable") + } + sub, err := newStructPLS(v.Addr().Interface()) + if err != nil { + return fmt.Errorf("datastore: unsupported struct field: %v", err) + } + return sub.(structPLS).save(props, name, noIndex, multiple) + } + } + if p.Value == nil { + return fmt.Errorf("datastore: unsupported struct field type: %v", v.Type()) + } + *props = append(*props, p) + return nil +} + +func (s structPLS) Save() ([]Property, error) { + var props []Property + if err := s.save(&props, "", false, false); err != nil { + return nil, err + } + return props, nil +} + +func (s structPLS) save(props *[]Property, prefix string, noIndex, multiple bool) error { + for i, t := range s.codec.byIndex { + if t.name == "-" { + continue + } + name := t.name + if prefix != "" { + name = prefix + name + } + v := s.v.Field(i) + if !v.IsValid() || !v.CanSet() { + continue + } + noIndex1 := noIndex || t.noIndex + // For slice fields that aren't []byte, save each element. + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { + for j := 0; j < v.Len(); j++ { + if err := saveStructProperty(props, name, noIndex1, true, v.Index(j)); err != nil { + return err + } + } + continue + } + // Otherwise, save the field itself. + if err := saveStructProperty(props, name, noIndex1, multiple, v); err != nil { + return err + } + } + return nil +} + +func propertiesToProto(key *Key, props []Property) (*pb.Entity, error) { + e := &pb.Entity{ + Key: keyToProto(key), + } + indexedProps := 0 + prevMultiple := make(map[string]*pb.Property) + for _, p := range props { + val, err := interfaceToProto(p.Value) + if err != "" { + return nil, fmt.Errorf("datastore: %s for a Property with Name %q", err, p.Name) + } + if !p.NoIndex { + rVal := reflect.ValueOf(p.Value) + if rVal.Kind() == reflect.Slice && rVal.Type().Elem().Kind() != reflect.Uint8 { + indexedProps += rVal.Len() + } else { + indexedProps++ + } + } + if indexedProps > maxIndexedProperties { + return nil, errors.New("datastore: too many indexed properties") + } + switch v := p.Value.(type) { + case string: + case []byte: + if len(v) > 1500 && !p.NoIndex { + return nil, fmt.Errorf("datastore: cannot index a Property with Name %q", p.Name) + } + } + val.Indexed = proto.Bool(!p.NoIndex) + if p.Multiple { + x, ok := prevMultiple[p.Name] + if !ok { + x = &pb.Property{ + Name: proto.String(p.Name), + Value: &pb.Value{}, + } + prevMultiple[p.Name] = x + e.Property = append(e.Property, x) + } + x.Value.ListValue = append(x.Value.ListValue, val) + } else { + e.Property = append(e.Property, &pb.Property{ + Name: proto.String(p.Name), + Value: val, + }) + } + } + return e, nil +} + +func interfaceToProto(iv interface{}) (p *pb.Value, errStr string) { + val := new(pb.Value) + switch v := iv.(type) { + case int: + val.IntegerValue = proto.Int64(int64(v)) + case int32: + val.IntegerValue = proto.Int64(int64(v)) + case int64: + val.IntegerValue = proto.Int64(v) + case bool: + val.BooleanValue = proto.Bool(v) + case string: + val.StringValue = proto.String(v) + case float32: + val.DoubleValue = proto.Float64(float64(v)) + case float64: + val.DoubleValue = proto.Float64(v) + case *Key: + if v != nil { + val.KeyValue = keyToProto(v) + } + case time.Time: + if v.Before(minTime) || v.After(maxTime) { + return nil, fmt.Sprintf("time value out of range") + } + val.TimestampMicrosecondsValue = proto.Int64(toUnixMicro(v)) + case []byte: + val.BlobValue = v + default: + if iv != nil { + return nil, fmt.Sprintf("invalid Value type %t", iv) + } + } + // TODO(jbd): Support ListValue and EntityValue. + // TODO(jbd): Support types whose underlying type is one of the types above. + return val, "" +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/datastore/testdata/index.yaml b/Godeps/_workspace/src/google.golang.org/cloud/datastore/testdata/index.yaml new file mode 100644 index 000000000..47bc9de86 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/datastore/testdata/index.yaml @@ -0,0 +1,41 @@ +indexes: + +- kind: SQChild + ancestor: yes + properties: + - name: T + - name: I + +- kind: SQChild + ancestor: yes + properties: + - name: T + - name: I + direction: desc + +- kind: SQChild + ancestor: yes + properties: + - name: I + - name: T + - name: U + +- kind: SQChild + ancestor: yes + properties: + - name: I + - name: T + - name: U + +- kind: SQChild + ancestor: yes + properties: + - name: T + - name: J + +- kind: SQChild + ancestor: yes + properties: + - name: T + - name: J + - name: U \ No newline at end of file diff --git a/Godeps/_workspace/src/google.golang.org/cloud/datastore/time.go b/Godeps/_workspace/src/google.golang.org/cloud/datastore/time.go new file mode 100644 index 000000000..e7f6a1931 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/datastore/time.go @@ -0,0 +1,36 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "math" + "time" +) + +var ( + minTime = time.Unix(int64(math.MinInt64)/1e6, (int64(math.MinInt64)%1e6)*1e3) + maxTime = time.Unix(int64(math.MaxInt64)/1e6, (int64(math.MaxInt64)%1e6)*1e3) +) + +func toUnixMicro(t time.Time) int64 { + // We cannot use t.UnixNano() / 1e3 because we want to handle times more than + // 2^63 nanoseconds (which is about 292 years) away from 1970, and those cannot + // be represented in the numerator of a single int64 divide. + return t.Unix()*1e6 + int64(t.Nanosecond()/1e3) +} + +func fromUnixMicro(t int64) time.Time { + return time.Unix(t/1e6, (t%1e6)*1e3) +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/datastore/time_test.go b/Godeps/_workspace/src/google.golang.org/cloud/datastore/time_test.go new file mode 100644 index 000000000..5cc846c4c --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/datastore/time_test.go @@ -0,0 +1,75 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "testing" + "time" +) + +func TestUnixMicro(t *testing.T) { + // Test that all these time.Time values survive a round trip to unix micros. + testCases := []time.Time{ + {}, + time.Date(2, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(23, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(234, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(1000, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(1600, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(1700, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(1800, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC), + time.Unix(-1e6, -1000), + time.Unix(-1e6, 0), + time.Unix(-1e6, +1000), + time.Unix(-60, -1000), + time.Unix(-60, 0), + time.Unix(-60, +1000), + time.Unix(-1, -1000), + time.Unix(-1, 0), + time.Unix(-1, +1000), + time.Unix(0, -3000), + time.Unix(0, -2000), + time.Unix(0, -1000), + time.Unix(0, 0), + time.Unix(0, +1000), + time.Unix(0, +2000), + time.Unix(+60, -1000), + time.Unix(+60, 0), + time.Unix(+60, +1000), + time.Unix(+1e6, -1000), + time.Unix(+1e6, 0), + time.Unix(+1e6, +1000), + time.Date(1999, 12, 31, 23, 59, 59, 999000, time.UTC), + time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(2006, 1, 2, 15, 4, 5, 678000, time.UTC), + time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC), + time.Date(3456, 1, 1, 0, 0, 0, 0, time.UTC), + } + for _, tc := range testCases { + got := fromUnixMicro(toUnixMicro(tc)) + if !got.Equal(tc) { + t.Errorf("got %q, want %q", got, tc) + } + } + + // Test that a time.Time that isn't an integral number of microseconds + // is not perfectly reconstructed after a round trip. + t0 := time.Unix(0, 123) + t1 := fromUnixMicro(toUnixMicro(t0)) + if t1.Nanosecond()%1000 != 0 || t0.Nanosecond()%1000 == 0 { + t.Errorf("quantization to µs: got %q with %d ns, started with %d ns", t1, t1.Nanosecond(), t0.Nanosecond()) + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/datastore/transaction.go b/Godeps/_workspace/src/google.golang.org/cloud/datastore/transaction.go new file mode 100644 index 000000000..344c313e9 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/datastore/transaction.go @@ -0,0 +1,241 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "errors" + "net/http" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + pb "google.golang.org/cloud/internal/datastore" + "google.golang.org/cloud/internal/transport" +) + +// ErrConcurrentTransaction is returned when a transaction is rolled back due +// to a conflict with a concurrent transaction. +var ErrConcurrentTransaction = errors.New("datastore: concurrent transaction") + +var errExpiredTransaction = errors.New("datastore: transaction expired") + +// A TransactionOption configures the Transaction returned by NewTransaction. +type TransactionOption interface { + apply(*pb.BeginTransactionRequest) +} + +type isolation struct { + level pb.BeginTransactionRequest_IsolationLevel +} + +func (i isolation) apply(req *pb.BeginTransactionRequest) { + req.IsolationLevel = i.level.Enum() +} + +var ( + // Snapshot causes the transaction to enforce a snapshot isolation level. + Snapshot TransactionOption = isolation{pb.BeginTransactionRequest_SNAPSHOT} + // Serializable causes the transaction to enforce a serializable isolation level. + Serializable TransactionOption = isolation{pb.BeginTransactionRequest_SERIALIZABLE} +) + +// Transaction represents a set of datastore operations to be committed atomically. +// +// Operations are enqueued by calling the Put and Delete methods on Transaction +// (or their Multi-equivalents). These operations are only committed when the +// Commit method is invoked. To ensure consistency, reads must be performed by +// using Transaction's Get method or by using the Transaction method when +// building a query. +// +// A Transaction must be committed or rolled back exactly once. +type Transaction struct { + id []byte + client *Client + ctx context.Context + mutation *pb.Mutation // The mutations to apply. + pending []*PendingKey // Incomplete keys pending transaction completion. +} + +// NewTransaction starts a new transaction. +func (c *Client) NewTransaction(ctx context.Context, opts ...TransactionOption) (*Transaction, error) { + req, resp := &pb.BeginTransactionRequest{}, &pb.BeginTransactionResponse{} + for _, o := range opts { + o.apply(req) + } + if err := c.call(ctx, "beginTransaction", req, resp); err != nil { + return nil, err + } + + return &Transaction{ + id: resp.Transaction, + ctx: ctx, + client: c, + mutation: &pb.Mutation{}, + }, nil +} + +// Commit applies the enqueued operations atomically. +func (t *Transaction) Commit() (*Commit, error) { + if t.id == nil { + return nil, errExpiredTransaction + } + req := &pb.CommitRequest{ + Transaction: t.id, + Mutation: t.mutation, + Mode: pb.CommitRequest_TRANSACTIONAL.Enum(), + } + t.id = nil + resp := &pb.CommitResponse{} + if err := t.client.call(t.ctx, "commit", req, resp); err != nil { + if e, ok := err.(*transport.ErrHTTP); ok && e.StatusCode == http.StatusConflict { + // TODO(jbd): Make sure that we explicitly handle the case where response + // has an HTTP 409 and the error message indicates that it's an concurrent + // transaction error. + return nil, ErrConcurrentTransaction + } + return nil, err + } + + // Copy any newly minted keys into the returned keys. + if len(t.pending) != len(resp.MutationResult.InsertAutoIdKey) { + return nil, errors.New("datastore: internal error: server returned the wrong number of keys") + } + commit := &Commit{} + for i, p := range t.pending { + p.key = protoToKey(resp.MutationResult.InsertAutoIdKey[i]) + p.commit = commit + } + + return commit, nil +} + +// Rollback abandons a pending transaction. +func (t *Transaction) Rollback() error { + if t.id == nil { + return errExpiredTransaction + } + id := t.id + t.id = nil + return t.client.call(t.ctx, "rollback", &pb.RollbackRequest{Transaction: id}, &pb.RollbackResponse{}) +} + +// Get is the transaction-specific version of the package function Get. +// All reads performed during the transaction will come from a single consistent +// snapshot. Furthermore, if the transaction is set to a serializable isolation +// level, another transaction cannot concurrently modify the data that is read +// or modified by this transaction. +func (t *Transaction) Get(key *Key, dst interface{}) error { + err := t.client.get(t.ctx, []*Key{key}, []interface{}{dst}, &pb.ReadOptions{Transaction: t.id}) + if me, ok := err.(MultiError); ok { + return me[0] + } + return err +} + +// GetMulti is a batch version of Get. +func (t *Transaction) GetMulti(keys []*Key, dst interface{}) error { + if t.id == nil { + return errExpiredTransaction + } + return t.client.get(t.ctx, keys, dst, &pb.ReadOptions{Transaction: t.id}) +} + +// Put is the transaction-specific version of the package function Put. +// +// Put returns a PendingKey which can be resolved into a Key using the +// return value from a successful Commit. If key is an incomplete key, the +// returned pending key will resolve to a unique key generated by the +// datastore. +func (t *Transaction) Put(key *Key, src interface{}) (*PendingKey, error) { + h, err := t.PutMulti([]*Key{key}, []interface{}{src}) + if err != nil { + if me, ok := err.(MultiError); ok { + return nil, me[0] + } + return nil, err + } + return h[0], nil +} + +// PutMulti is a batch version of Put. One PendingKey is returned for each +// element of src in the same order. +func (t *Transaction) PutMulti(keys []*Key, src interface{}) ([]*PendingKey, error) { + if t.id == nil { + return nil, errExpiredTransaction + } + mutation, err := putMutation(keys, src) + if err != nil { + return nil, err + } + proto.Merge(t.mutation, mutation) + + // Prepare the returned handles, pre-populating where possible. + ret := make([]*PendingKey, len(keys)) + for i, key := range keys { + h := &PendingKey{} + if key.Incomplete() { + // This key will be in the final commit result. + t.pending = append(t.pending, h) + } else { + h.key = key + } + + ret[i] = h + } + + return ret, nil +} + +// Delete is the transaction-specific version of the package function Delete. +// Delete enqueues the deletion of the entity for the given key, to be +// committed atomically upon calling Commit. +func (t *Transaction) Delete(key *Key) error { + err := t.DeleteMulti([]*Key{key}) + if me, ok := err.(MultiError); ok { + return me[0] + } + return err +} + +// DeleteMulti is a batch version of Delete. +func (t *Transaction) DeleteMulti(keys []*Key) error { + if t.id == nil { + return errExpiredTransaction + } + mutation, err := deleteMutation(keys) + if err != nil { + return err + } + proto.Merge(t.mutation, mutation) + return nil +} + +// Commit represents the result of a committed transaction. +type Commit struct{} + +// Key resolves a pending key handle into a final key. +func (c *Commit) Key(p *PendingKey) *Key { + if c != p.commit { + panic("PendingKey was not created by corresponding transaction") + } + return p.key +} + +// PendingKey represents the key for newly-inserted entity. It can be +// resolved into a Key by calling the Key method of Commit. +type PendingKey struct { + key *Key + commit *Commit +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/examples/bigquery/concat_table/main.go b/Godeps/_workspace/src/google.golang.org/cloud/examples/bigquery/concat_table/main.go new file mode 100644 index 000000000..978801b04 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/examples/bigquery/concat_table/main.go @@ -0,0 +1,111 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// concat_table is an example client of the bigquery client library. +// It concatenates two BigQuery tables and writes the result to another table. +package main + +import ( + "flag" + "fmt" + "log" + "os" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2/google" + "google.golang.org/cloud/bigquery" +) + +var ( + project = flag.String("project", "", "The ID of a Google Cloud Platform project") + dataset = flag.String("dataset", "", "The ID of a BigQuery dataset") + src1 = flag.String("src1", "", "The ID of the first BigQuery table to concatenate") + src2 = flag.String("src2", "", "The ID of the second BigQuery table to concatenate") + dest = flag.String("dest", "", "The ID of the BigQuery table to write the result to") + pollint = flag.Duration("pollint", 10*time.Second, "Polling interval for checking job status") +) + +func main() { + flag.Parse() + + flagsOk := true + for _, f := range []string{"project", "dataset", "src1", "src2", "dest"} { + if flag.Lookup(f).Value.String() == "" { + fmt.Fprintf(os.Stderr, "Flag --%s is required\n", f) + flagsOk = false + } + } + if !flagsOk { + os.Exit(1) + } + if *src1 == *src2 || *src1 == *dest || *src2 == *dest { + log.Fatalf("Different values must be supplied for each of --src1, --src2 and --dest") + } + + httpClient, err := google.DefaultClient(context.Background(), bigquery.Scope) + if err != nil { + log.Fatalf("Creating http client: %v", err) + } + + client, err := bigquery.NewClient(httpClient, *project) + if err != nil { + log.Fatalf("Creating bigquery client: %v", err) + } + + s1 := &bigquery.Table{ + ProjectID: *project, + DatasetID: *dataset, + TableID: *src1, + } + + s2 := &bigquery.Table{ + ProjectID: *project, + DatasetID: *dataset, + TableID: *src2, + } + + d := &bigquery.Table{ + ProjectID: *project, + DatasetID: *dataset, + TableID: *dest, + } + + // Concatenate data. + job, err := client.Copy(context.Background(), d, bigquery.Tables{s1, s2}, bigquery.WriteTruncate) + + if err != nil { + log.Fatalf("Concatenating: %v", err) + } + + fmt.Printf("Job for concatenation operation: %+v\n", job) + fmt.Printf("Waiting for job to complete.\n") + + for range time.Tick(*pollint) { + status, err := job.Status(context.Background()) + if err != nil { + fmt.Printf("Failure determining status: %v", err) + break + } + if !status.Done() { + continue + } + if err := status.Err(); err == nil { + fmt.Printf("Success\n") + } else { + fmt.Printf("Failure: %+v\n", err) + } + break + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/examples/bigquery/load/main.go b/Godeps/_workspace/src/google.golang.org/cloud/examples/bigquery/load/main.go new file mode 100644 index 000000000..30ed9dbba --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/examples/bigquery/load/main.go @@ -0,0 +1,104 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// load is an example client of the bigquery client library. +// It loads a file from Google Cloud Storage into a BigQuery table. +package main + +import ( + "flag" + "fmt" + "log" + "os" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2/google" + "google.golang.org/cloud/bigquery" +) + +var ( + project = flag.String("project", "", "The ID of a Google Cloud Platform project") + dataset = flag.String("dataset", "", "The ID of a BigQuery dataset") + table = flag.String("table", "", "The ID of a BigQuery table to load data into") + bucket = flag.String("bucket", "", "The name of a Google Cloud Storage bucket to load data from") + object = flag.String("object", "", "The name of a Google Cloud Storage object to load data from. Must exist within the bucket specified by --bucket") + skiprows = flag.Int64("skiprows", 0, "The number of rows of the source data to skip when loading") + pollint = flag.Duration("pollint", 10*time.Second, "Polling interval for checking job status") +) + +func main() { + flag.Parse() + + flagsOk := true + for _, f := range []string{"project", "dataset", "table", "bucket", "object"} { + if flag.Lookup(f).Value.String() == "" { + fmt.Fprintf(os.Stderr, "Flag --%s is required\n", f) + flagsOk = false + } + } + if !flagsOk { + os.Exit(1) + } + + httpClient, err := google.DefaultClient(context.Background(), bigquery.Scope) + if err != nil { + log.Fatalf("Creating http client: %v", err) + } + + client, err := bigquery.NewClient(httpClient, *project) + if err != nil { + log.Fatalf("Creating bigquery client: %v", err) + } + + table := &bigquery.Table{ + ProjectID: *project, + DatasetID: *dataset, + TableID: *table, + } + + gcs := client.NewGCSReference(fmt.Sprintf("gs://%s/%s", *bucket, *object)) + gcs.SkipLeadingRows = *skiprows + + // Load data from Google Cloud Storage into a BigQuery table. + job, err := client.Copy( + context.Background(), table, gcs, + bigquery.MaxBadRecords(1), + bigquery.AllowQuotedNewlines(), + bigquery.WriteTruncate) + + if err != nil { + log.Fatalf("Loading data: %v", err) + } + + fmt.Printf("Job for data load operation: %+v\n", job) + fmt.Printf("Waiting for job to complete.\n") + + for range time.Tick(*pollint) { + status, err := job.Status(context.Background()) + if err != nil { + fmt.Printf("Failure determining status: %v", err) + break + } + if !status.Done() { + continue + } + if err := status.Err(); err == nil { + fmt.Printf("Success\n") + } else { + fmt.Printf("Failure: %+v\n", err) + } + break + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/examples/bigquery/query/main.go b/Godeps/_workspace/src/google.golang.org/cloud/examples/bigquery/query/main.go new file mode 100644 index 000000000..d6dc0b6a1 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/examples/bigquery/query/main.go @@ -0,0 +1,108 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// query is an example client of the bigquery client library. +// It submits a query and writes the result to a table. +package main + +import ( + "flag" + "fmt" + "log" + "os" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2/google" + "google.golang.org/cloud/bigquery" +) + +var ( + project = flag.String("project", "", "The ID of a Google Cloud Platform project") + dataset = flag.String("dataset", "", "The ID of a BigQuery dataset") + q = flag.String("q", "", "The query string") + dest = flag.String("dest", "", "The ID of the BigQuery table to write the result to. If unset, an ephemeral table ID will be generated.") + pollint = flag.Duration("pollint", 10*time.Second, "Polling interval for checking job status") + wait = flag.Bool("wait", false, "Whether to wait for the query job to complete.") +) + +func main() { + flag.Parse() + + flagsOk := true + for _, f := range []string{"project", "dataset", "q"} { + if flag.Lookup(f).Value.String() == "" { + fmt.Fprintf(os.Stderr, "Flag --%s is required\n", f) + flagsOk = false + } + } + if !flagsOk { + os.Exit(1) + } + + httpClient, err := google.DefaultClient(context.Background(), bigquery.Scope) + if err != nil { + log.Fatalf("Creating http client: %v", err) + } + + client, err := bigquery.NewClient(httpClient, *project) + if err != nil { + log.Fatalf("Creating bigquery client: %v", err) + } + + d := &bigquery.Table{} + + if *dest != "" { + d.ProjectID = *project + d.DatasetID = *dataset + d.TableID = *dest + } + + query := &bigquery.Query{ + Q: *q, + DefaultProjectID: *project, + DefaultDatasetID: *dataset, + } + + // Query data. + job, err := client.Copy(context.Background(), d, query, bigquery.WriteTruncate) + + if err != nil { + log.Fatalf("Querying: %v", err) + } + + fmt.Printf("Submitted query. Job ID: %s\n", job.ID()) + if !*wait { + return + } + + fmt.Printf("Waiting for job to complete.\n") + + for range time.Tick(*pollint) { + status, err := job.Status(context.Background()) + if err != nil { + fmt.Printf("Failure determining status: %v", err) + break + } + if !status.Done() { + continue + } + if err := status.Err(); err == nil { + fmt.Printf("Success\n") + } else { + fmt.Printf("Failure: %+v\n", err) + } + break + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/examples/bigquery/read/main.go b/Godeps/_workspace/src/google.golang.org/cloud/examples/bigquery/read/main.go new file mode 100644 index 000000000..181bd7c78 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/examples/bigquery/read/main.go @@ -0,0 +1,148 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// read is an example client of the bigquery client library. +// It reads from a table, returning the data via an Iterator. +package main + +import ( + "flag" + "fmt" + "log" + "os" + "regexp" + "strings" + "text/tabwriter" + + "golang.org/x/net/context" + "golang.org/x/oauth2/google" + "google.golang.org/cloud/bigquery" +) + +var ( + project = flag.String("project", "", "The ID of a Google Cloud Platform project") + dataset = flag.String("dataset", "", "The ID of a BigQuery dataset") + table = flag.String("table", ".*", "A regular expression to match the IDs of tables to read.") + jobID = flag.String("jobid", "", "The ID of a query job that has already been submitted."+ + " If set, --dataset, --table will be ignored, and results will be read from the specified job.") +) + +func printValues(it *bigquery.Iterator) { + // one-space padding. + tw := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0) + + for it.Next(context.Background()) { + var vals bigquery.ValueList + if err := it.Get(&vals); err != nil { + fmt.Printf("err calling get: %v\n", err) + } else { + sep := "" + for _, v := range vals { + fmt.Fprintf(tw, "%s%v", sep, v) + sep = "\t" + } + fmt.Fprintf(tw, "\n") + } + } + tw.Flush() + + fmt.Printf("\n") + if err := it.Err(); err != nil { + fmt.Printf("err reading: %v\n", err) + } +} + +func printTable(client *bigquery.Client, t *bigquery.Table) { + it, err := client.Read(context.Background(), t) + if err != nil { + log.Fatalf("Reading: %v", err) + } + + id := t.FullyQualifiedName() + fmt.Printf("%s\n%s\n", id, strings.Repeat("-", len(id))) + printValues(it) +} + +func printQueryResults(client *bigquery.Client, queryJobID string) { + job, err := client.JobFromID(context.Background(), queryJobID) + if err != nil { + log.Fatalf("Loading job: %v", err) + } + + it, err := client.Read(context.Background(), job) + if err != nil { + log.Fatalf("Reading: %v", err) + } + + // TODO: print schema. + printValues(it) +} + +func main() { + flag.Parse() + + flagsOk := true + if flag.Lookup("project").Value.String() == "" { + fmt.Fprintf(os.Stderr, "Flag --project is required\n") + flagsOk = false + } + + var sourceFlagCount int + if flag.Lookup("dataset").Value.String() != "" { + sourceFlagCount++ + } + if flag.Lookup("jobid").Value.String() != "" { + sourceFlagCount++ + } + if sourceFlagCount != 1 { + fmt.Fprintf(os.Stderr, "Exactly one of --dataset or --jobid must be set\n") + flagsOk = false + } + + if !flagsOk { + os.Exit(1) + } + + tableRE, err := regexp.Compile(*table) + if err != nil { + fmt.Fprintf(os.Stderr, "--table is not a valid regular expression: %q\n", *table) + os.Exit(1) + } + + httpClient, err := google.DefaultClient(context.Background(), bigquery.Scope) + if err != nil { + log.Fatalf("Creating http client: %v", err) + } + + client, err := bigquery.NewClient(httpClient, *project) + if err != nil { + log.Fatalf("Creating bigquery client: %v", err) + } + + if *jobID != "" { + printQueryResults(client, *jobID) + return + } + ds := client.Dataset(*dataset) + var tables []*bigquery.Table + tables, err = ds.ListTables(context.Background()) + if err != nil { + log.Fatalf("Listing tables: %v", err) + } + for _, t := range tables { + if tableRE.MatchString(t.TableID) { + printTable(client, t) + } + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/examples/bigtable/bigtable-hello/README.md b/Godeps/_workspace/src/google.golang.org/cloud/examples/bigtable/bigtable-hello/README.md new file mode 100644 index 000000000..3f3e92155 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/examples/bigtable/bigtable-hello/README.md @@ -0,0 +1,30 @@ +# Cloud Bigtable on Managed VMs using Go +# (Hello World for Cloud Bigtable) + +This app counts how often each user visits. + +## Prerequisites + +1. Set up Cloud Console. + 1. Go to the [Cloud Console](https://cloud.google.com/console) and create or select your project. + You will need the project ID later. + 1. Go to **Settings > Project Billing Settings** and enable billing. + 1. Select **APIs & Auth > APIs**. + 1. Enable the **Cloud Bigtable API** and the **Cloud Bigtable Admin API**. + (You may need to search for the API). +1. Set up gcloud. + 1. `gcloud components update` + 1. `gcloud auth login` + 1. `gcloud config set project PROJECT_ID` +1. Download App Engine SDK for Go. + 1. `go get -u google.golang.org/appengine/...` +1. In helloworld.go, change the constants `project`, `zone` and `cluster` + +## Running locally + +1. From the sample project folder, `gcloud preview app run app.yaml` + +## Deploying on Google App Engine Managed VM + +1. Install and start [Docker](https://cloud.google.com/appengine/docs/managed-vms/getting-started#install_docker). +1. From the sample project folder, `aedeploy gcloud preview app deploy app.yaml` diff --git a/Godeps/_workspace/src/google.golang.org/cloud/examples/bigtable/bigtable-hello/app.yaml b/Godeps/_workspace/src/google.golang.org/cloud/examples/bigtable/bigtable-hello/app.yaml new file mode 100644 index 000000000..4f5fef0e5 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/examples/bigtable/bigtable-hello/app.yaml @@ -0,0 +1,11 @@ +runtime: go +api_version: go1 +vm: true + +manual_scaling: + instances: 1 + +handlers: +# Serve only the web root. +- url: / + script: _go_app diff --git a/Godeps/_workspace/src/google.golang.org/cloud/examples/bigtable/bigtable-hello/helloworld.go b/Godeps/_workspace/src/google.golang.org/cloud/examples/bigtable/bigtable-hello/helloworld.go new file mode 100644 index 000000000..8f1bd702e --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/examples/bigtable/bigtable-hello/helloworld.go @@ -0,0 +1,169 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +/* +helloworld tracks how often a user has visited the index page. + +This program demonstrates usage of the Cloud Bigtable API for Managed VMs and Go. +Instructions for running this program are in the README.md. +*/ +package main + +import ( + "bytes" + "encoding/binary" + "html/template" + "log" + "net/http" + + "golang.org/x/net/context" + "google.golang.org/appengine" + aelog "google.golang.org/appengine/log" + "google.golang.org/appengine/user" + "google.golang.org/cloud/bigtable" +) + +// User-provided constants. +const ( + project = "PROJECT_ID" + zone = "CLUSTER_ZONE" + cluster = "CLUSTER_NAME" +) + +var ( + tableName = "bigtable-hello" + familyName = "emails" + + // Client is initialized by main. + client *bigtable.Client +) + +func main() { + ctx := context.Background() + + // Set up admin client, tables, and column families. + // NewAdminClient uses Application Default Credentials to authenticate. + adminClient, err := bigtable.NewAdminClient(ctx, project, zone, cluster) + if err != nil { + log.Fatalf("Unable to create a table admin client. %v", err) + } + tables, err := adminClient.Tables(ctx) + if err != nil { + log.Fatalf("Unable to fetch table list. %v", err) + } + if !sliceContains(tables, tableName) { + if err := adminClient.CreateTable(ctx, tableName); err != nil { + log.Fatalf("Unable to create table: %v. %v", tableName, err) + } + } + tblInfo, err := adminClient.TableInfo(ctx, tableName) + if err != nil { + log.Fatalf("Unable to read info for table: %v. %v", tableName, err) + } + if !sliceContains(tblInfo.Families, familyName) { + if err := adminClient.CreateColumnFamily(ctx, tableName, familyName); err != nil { + log.Fatalf("Unable to create column family: %v. %v", familyName, err) + } + } + adminClient.Close() + + // Set up Bigtable data operations client. + // NewClient uses Application Default Credentials to authenticate. + client, err = bigtable.NewClient(ctx, project, zone, cluster) + if err != nil { + log.Fatalf("Unable to create data operations client. %v", err) + } + + http.Handle("/", appHandler(mainHandler)) + appengine.Main() // Never returns. +} + +// mainHandler tracks how many times each user has visited this page. +func mainHandler(w http.ResponseWriter, r *http.Request) *appError { + if r.URL.Path != "/" { + http.NotFound(w, r) + return nil + } + + ctx := appengine.NewContext(r) + u := user.Current(ctx) + if u == nil { + login, err := user.LoginURL(ctx, r.URL.String()) + if err != nil { + return &appError{err, "Error finding login URL", http.StatusInternalServerError} + } + http.Redirect(w, r, login, http.StatusFound) + return nil + } + logoutURL, err := user.LogoutURL(ctx, "/") + if err != nil { + return &appError{err, "Error finding logout URL", http.StatusInternalServerError} + } + + // Display hello page. + tbl := client.Open(tableName) + rmw := bigtable.NewReadModifyWrite() + rmw.Increment(familyName, u.Email, 1) + row, err := tbl.ApplyReadModifyWrite(ctx, u.Email, rmw) + if err != nil { + return &appError{err, "Error applying ReadModifyWrite to row: " + u.Email, http.StatusInternalServerError} + } + data := struct { + Username, Logout string + Visits uint64 + }{ + Username: u.Email, + // Retrieve the most recently edited column. + Visits: binary.BigEndian.Uint64(row[familyName][0].Value), + Logout: logoutURL, + } + var buf bytes.Buffer + if err := tmpl.Execute(&buf, data); err != nil { + return &appError{err, "Error writing template", http.StatusInternalServerError} + } + buf.WriteTo(w) + return nil +} + +var tmpl = template.Must(template.New("").Parse(` + + +

+{{with .Username}} Hello {{.}}{{end}} +{{with .Logout}}Sign out{{end}} + +

+ +

+You have visited {{.Visits}} +

+ +`)) + +// sliceContains reports whether the provided string is present in the given slice of strings. +func sliceContains(list []string, target string) bool { + for _, s := range list { + if s == target { + return true + } + } + return false +} + +// More info about this method of error handling can be found at: http://blog.golang.org/error-handling-and-go +type appHandler func(http.ResponseWriter, *http.Request) *appError + +type appError struct { + Error error + Message string + Code int +} + +func (fn appHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if e := fn(w, r); e != nil { + ctx := appengine.NewContext(r) + aelog.Errorf(ctx, "%v", e.Error) + http.Error(w, e.Message, e.Code) + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/examples/bigtable/search/search.go b/Godeps/_workspace/src/google.golang.org/cloud/examples/bigtable/search/search.go new file mode 100644 index 000000000..c9b1339d5 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/examples/bigtable/search/search.go @@ -0,0 +1,439 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This is a sample web server that uses Cloud Bigtable as the storage layer +// for a simple document-storage and full-text-search service. +// It has three functions: +// - Add a document. This adds the content of a user-supplied document to the +// Bigtable, and adds references to the document to an index in the Bigtable. +// The document is indexed under each unique word in the document. +// - Search the index. This returns documents containing each word in a user +// query, with snippets and links to view the whole document. +// - Clear the table. This deletes and recreates the Bigtable, +package main + +import ( + "bytes" + "flag" + "fmt" + "html/template" + "io" + "log" + "net/http" + "os" + "strings" + "sync" + "time" + "unicode" + + "golang.org/x/net/context" + "google.golang.org/cloud/bigtable" +) + +var ( + project = flag.String("project", "", "The name of the project.") + zone = flag.String("zone", "", "The zone of the project.") + cluster = flag.String("cluster", "", "The name of the Cloud Bigtable cluster.") + tableName = flag.String("table", "docindex", "The name of the table containing the documents and index.") + credFile = flag.String("creds", "", "File containing credentials") + rebuild = flag.Bool("rebuild", false, "Rebuild the table from scratch on startup.") + + client *bigtable.Client + adminClient *bigtable.AdminClient + table *bigtable.Table + + addTemplate = template.Must(template.New("").Parse(` +Added {{.Title}} +`)) + + contentTemplate = template.Must(template.New("").Parse(` +{{.Title}}

+{{.Content}} +`)) + + searchTemplate = template.Must(template.New("").Parse(` +Results for {{.Query}}:

+{{range .Results}} +{{.Title}}
+{{.Snippet}}

+{{end}} +`)) +) + +const ( + // prototypeTableName is an existing table containing some documents. + // Rebuilding a table will populate it with the data from this table. + prototypeTableName = "shakespearetemplate" + indexColumnFamily = "i" + contentColumnFamily = "c" + mainPage = ` + + + Document Search + + + Search for documents: +
+
+
+ + + Add a document: +
+ Document name: +
+ Document text: +
+
+ + + Rebuild table: +
+
+ + + + ` +) + +func main() { + flag.Parse() + + if *tableName == prototypeTableName { + log.Fatal("Can't use " + prototypeTableName + " as your table.") + } + + // Let the library get credentials from file. + os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", *credFile) + + // Make an admin client. + var err error + if adminClient, err = bigtable.NewAdminClient(context.Background(), *project, *zone, *cluster); err != nil { + log.Fatal("Bigtable NewAdminClient:", err) + } + + // Make a regular client. + client, err = bigtable.NewClient(context.Background(), *project, *zone, *cluster) + if err != nil { + log.Fatal("Bigtable NewClient:", err) + } + + // Open the table. + table = client.Open(*tableName) + + // Rebuild the table if the command-line flag is set. + if *rebuild { + if err := rebuildTable(); err != nil { + log.Fatal(err) + } + } + + // Set up HTML handlers, and start the web server. + http.HandleFunc("/search", handleSearch) + http.HandleFunc("/content", handleContent) + http.HandleFunc("/add", handleAddDoc) + http.HandleFunc("/clearindex", handleClear) + http.HandleFunc("/", handleMain) + log.Fatal(http.ListenAndServe(":8080", nil)) +} + +// handleMain outputs the home page, containing a search box, an "add document" box, and "clear table" button. +func handleMain(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, mainPage) +} + +// tokenize splits a string into tokens. +// This is very simple, it's not a good tokenization function. +func tokenize(s string) []string { + wordMap := make(map[string]bool) + f := strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsLetter(r) }) + for _, word := range f { + word = strings.ToLower(word) + wordMap[word] = true + } + words := make([]string, 0, len(wordMap)) + for word := range wordMap { + words = append(words, word) + } + return words +} + +// handleContent fetches the content of a document from the Bigtable and returns it. +func handleContent(w http.ResponseWriter, r *http.Request) { + ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) + name := r.FormValue("name") + if len(name) == 0 { + http.Error(w, "No document name supplied.", http.StatusBadRequest) + return + } + + row, err := table.ReadRow(ctx, name) + if err != nil { + http.Error(w, "Error reading content: "+err.Error(), http.StatusInternalServerError) + return + } + content := row[contentColumnFamily] + if len(content) == 0 { + http.Error(w, "Document not found.", http.StatusNotFound) + return + } + var buf bytes.Buffer + if err := contentTemplate.ExecuteTemplate(&buf, "", struct{ Title, Content string }{name, string(content[0].Value)}); err != nil { + http.Error(w, "Error executing HTML template: "+err.Error(), http.StatusInternalServerError) + return + } + io.Copy(w, &buf) +} + +// handleSearch responds to search queries, returning links and snippets for matching documents. +func handleSearch(w http.ResponseWriter, r *http.Request) { + ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) + query := r.FormValue("q") + // Split the query into words. + words := tokenize(query) + if len(words) == 0 { + http.Error(w, "Empty query.", http.StatusBadRequest) + return + } + + // readRows reads from many rows concurrently. + readRows := func(rows []string) ([]bigtable.Row, error) { + results := make([]bigtable.Row, len(rows)) + errors := make([]error, len(rows)) + var wg sync.WaitGroup + for i, row := range rows { + wg.Add(1) + go func(i int, row string) { + defer wg.Done() + results[i], errors[i] = table.ReadRow(ctx, row) + }(i, row) + } + wg.Wait() + for _, err := range errors { + if err != nil { + return nil, err + } + } + return results, nil + } + + // For each query word, get the list of documents containing it. + results, err := readRows(words) + if err != nil { + http.Error(w, "Error reading index: "+err.Error(), http.StatusInternalServerError) + return + } + + // Count how many of the query words each result contained. + hits := make(map[string]int) + for _, r := range results { + for _, r := range r[indexColumnFamily] { + hits[r.Column]++ + } + } + + // Build a slice of all the documents that matched every query word. + var matches []string + for doc, count := range hits { + if count == len(words) { + matches = append(matches, doc[len(indexColumnFamily+":"):]) + } + } + + // Fetch the content of those documents from the Bigtable. + content, err := readRows(matches) + if err != nil { + http.Error(w, "Error reading results: "+err.Error(), http.StatusInternalServerError) + return + } + + type result struct{ Title, Snippet string } + data := struct { + Query string + Results []result + }{query, nil} + + // Output links and snippets. + for i, doc := range matches { + var text string + c := content[i][contentColumnFamily] + if len(c) > 0 { + text = string(c[0].Value) + } + if len(text) > 100 { + text = text[:100] + "..." + } + data.Results = append(data.Results, result{doc, text}) + } + var buf bytes.Buffer + if err := searchTemplate.ExecuteTemplate(&buf, "", data); err != nil { + http.Error(w, "Error executing HTML template: "+err.Error(), http.StatusInternalServerError) + return + } + io.Copy(w, &buf) +} + +// handleAddDoc adds a document to the index. +func handleAddDoc(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + http.Error(w, "POST requests only", http.StatusMethodNotAllowed) + return + } + + ctx, _ := context.WithTimeout(context.Background(), time.Minute) + + name := r.FormValue("name") + if len(name) == 0 { + http.Error(w, "Empty document name!", http.StatusBadRequest) + return + } + + content := r.FormValue("content") + if len(content) == 0 { + http.Error(w, "Empty document content!", http.StatusBadRequest) + return + } + + var ( + writeErr error // Set if any write fails. + mu sync.Mutex // Protects writeErr + wg sync.WaitGroup // Used to wait for all writes to finish. + ) + + // writeOneColumn writes one column in one row, updates err if there is an error, + // and signals wg that one operation has finished. + writeOneColumn := func(row, family, column, value string, ts bigtable.Timestamp) { + mut := bigtable.NewMutation() + mut.Set(family, column, ts, []byte(value)) + err := table.Apply(ctx, row, mut) + if err != nil { + mu.Lock() + writeErr = err + mu.Unlock() + } + } + + // Start a write to store the document content. + wg.Add(1) + go func() { + writeOneColumn(name, contentColumnFamily, "", content, bigtable.Now()) + wg.Done() + }() + + // Start writes to store the document name in the index for each word in the document. + words := tokenize(content) + for _, word := range words { + var ( + row = word + family = indexColumnFamily + column = name + value = "" + ts = bigtable.Now() + ) + wg.Add(1) + go func() { + // TODO: should use a semaphore to limit the number of concurrent writes. + writeOneColumn(row, family, column, value, ts) + wg.Done() + }() + } + wg.Wait() + if writeErr != nil { + http.Error(w, "Error writing to Bigtable: "+writeErr.Error(), http.StatusInternalServerError) + return + } + var buf bytes.Buffer + if err := addTemplate.ExecuteTemplate(&buf, "", struct{ Title string }{name}); err != nil { + http.Error(w, "Error executing HTML template: "+err.Error(), http.StatusInternalServerError) + return + } + io.Copy(w, &buf) +} + +// rebuildTable deletes the table if it exists, then creates the table, with the index column family. +func rebuildTable() error { + ctx, _ := context.WithTimeout(context.Background(), 5*time.Minute) + adminClient.DeleteTable(ctx, *tableName) + if err := adminClient.CreateTable(ctx, *tableName); err != nil { + return fmt.Errorf("CreateTable: %v", err) + } + time.Sleep(20 * time.Second) + if err := adminClient.CreateColumnFamily(ctx, *tableName, indexColumnFamily); err != nil { + return fmt.Errorf("CreateColumnFamily: %v", err) + } + if err := adminClient.CreateColumnFamily(ctx, *tableName, contentColumnFamily); err != nil { + return fmt.Errorf("CreateColumnFamily: %v", err) + } + + // Open the prototype table. It contains a number of documents to get started with. + prototypeTable := client.Open(prototypeTableName) + + var ( + writeErr error // Set if any write fails. + mu sync.Mutex // Protects writeErr + wg sync.WaitGroup // Used to wait for all writes to finish. + ) + copyRowToTable := func(row bigtable.Row) bool { + mu.Lock() + failed := writeErr != nil + mu.Unlock() + if failed { + return false + } + mut := bigtable.NewMutation() + for family, items := range row { + for _, item := range items { + // Get the column name, excluding the column family name and ':' character. + columnWithoutFamily := item.Column[len(family)+1:] + mut.Set(family, columnWithoutFamily, bigtable.Now(), item.Value) + } + } + wg.Add(1) + go func() { + // TODO: should use a semaphore to limit the number of concurrent writes. + if err := table.Apply(ctx, row.Key(), mut); err != nil { + mu.Lock() + writeErr = err + mu.Unlock() + } + wg.Done() + }() + return true + } + + // Create a filter that only accepts the column families we're interested in. + filter := bigtable.FamilyFilter(indexColumnFamily + "|" + contentColumnFamily) + // Read every row from prototypeTable, and call copyRowToTable to copy it to our table. + err := prototypeTable.ReadRows(ctx, bigtable.InfiniteRange(""), copyRowToTable, bigtable.RowFilter(filter)) + wg.Wait() + if err != nil { + return err + } + return writeErr +} + +// handleClear calls rebuildTable +func handleClear(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + http.Error(w, "POST requests only", http.StatusMethodNotAllowed) + return + } + if err := rebuildTable(); err != nil { + http.Error(w, "Failed to rebuild index: "+err.Error(), http.StatusInternalServerError) + return + } + fmt.Fprint(w, "Rebuilt index.\n") +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/examples/pubsub/cmdline/main.go b/Godeps/_workspace/src/google.golang.org/cloud/examples/pubsub/cmdline/main.go new file mode 100644 index 000000000..c51614d1c --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/examples/pubsub/cmdline/main.go @@ -0,0 +1,354 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package main contains a simple command line tool for Cloud Pub/Sub +// Cloud Pub/Sub docs: https://cloud.google.com/pubsub/docs +package main + +import ( + "errors" + "flag" + "fmt" + "io/ioutil" + "log" + "net/http" + "os" + "strconv" + "time" + + "golang.org/x/net/context" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "google.golang.org/cloud" + "google.golang.org/cloud/compute/metadata" + "google.golang.org/cloud/pubsub" +) + +var ( + jsonFile = flag.String("j", "", "A path to your JSON key file for your service account downloaded from Google Developer Console, not needed if you run it on Compute Engine instances.") + projID = flag.String("p", "", "The ID of your Google Cloud project.") + reportMPS = flag.Bool("report", false, "Reports the incoming/outgoing message rate in msg/sec if set.") + size = flag.Int("size", 10, "Batch size for pull_messages and publish_messages subcommands.") +) + +const ( + usage = `Available arguments are: + create_topic + delete_topic + create_subscription + delete_subscription + publish + pull_messages + publish_messages +` + tick = 1 * time.Second +) + +func usageAndExit(msg string) { + fmt.Fprintln(os.Stderr, msg) + fmt.Println("Flags:") + flag.PrintDefaults() + fmt.Fprint(os.Stderr, usage) + os.Exit(2) +} + +// Check the length of the arguments. +func checkArgs(argv []string, min int) { + if len(argv) < min { + usageAndExit("Missing arguments") + } +} + +// newClient creates http.Client with a jwt service account when +// jsonFile flag is specified, otherwise by obtaining the GCE service +// account's access token. +func newClient(jsonFile string) (*http.Client, error) { + if jsonFile != "" { + jsonKey, err := ioutil.ReadFile(jsonFile) + if err != nil { + return nil, err + } + conf, err := google.JWTConfigFromJSON(jsonKey, pubsub.ScopePubSub) + if err != nil { + return nil, err + } + return conf.Client(oauth2.NoContext), nil + } + if metadata.OnGCE() { + c := &http.Client{ + Transport: &oauth2.Transport{ + Source: google.ComputeTokenSource(""), + }, + } + if *projID == "" { + projectID, err := metadata.ProjectID() + if err != nil { + return nil, fmt.Errorf("ProjectID failed, %v", err) + } + *projID = projectID + } + return c, nil + } + return nil, errors.New("Could not create an authenticated client.") +} + +func listTopics(ctx context.Context, argv []string) { + panic("listTopics not implemented yet") +} + +func createTopic(ctx context.Context, argv []string) { + checkArgs(argv, 2) + topic := argv[1] + err := pubsub.CreateTopic(ctx, topic) + if err != nil { + log.Fatalf("CreateTopic failed, %v", err) + } + fmt.Printf("Topic %s was created.\n", topic) +} + +func deleteTopic(ctx context.Context, argv []string) { + checkArgs(argv, 2) + topic := argv[1] + err := pubsub.DeleteTopic(ctx, topic) + if err != nil { + log.Fatalf("DeleteTopic failed, %v", err) + } + fmt.Printf("Topic %s was deleted.\n", topic) +} + +func listSubscriptions(ctx context.Context, argv []string) { + panic("listSubscriptions not implemented yet") +} + +func createSubscription(ctx context.Context, argv []string) { + checkArgs(argv, 3) + sub := argv[1] + topic := argv[2] + err := pubsub.CreateSub(ctx, sub, topic, 60*time.Second, "") + if err != nil { + log.Fatalf("CreateSub failed, %v", err) + } + fmt.Printf("Subscription %s was created.\n", sub) +} + +func deleteSubscription(ctx context.Context, argv []string) { + checkArgs(argv, 2) + sub := argv[1] + err := pubsub.DeleteSub(ctx, sub) + if err != nil { + log.Fatalf("DeleteSub failed, %v", err) + } + fmt.Printf("Subscription %s was deleted.\n", sub) +} + +func publish(ctx context.Context, argv []string) { + checkArgs(argv, 3) + topic := argv[1] + message := argv[2] + msgIDs, err := pubsub.Publish(ctx, topic, &pubsub.Message{ + Data: []byte(message), + }) + if err != nil { + log.Fatalf("Publish failed, %v", err) + } + fmt.Printf("Message '%s' published to a topic %s and the message id is %s\n", message, topic, msgIDs[0]) +} + +type reporter struct { + reportTitle string + lastC uint64 + c uint64 + result <-chan int +} + +func (r *reporter) report() { + ticker := time.NewTicker(tick) + defer func() { + ticker.Stop() + }() + for { + select { + case <-ticker.C: + n := r.c - r.lastC + r.lastC = r.c + mps := n / uint64(tick/time.Second) + log.Printf("%s ~%d msgs/s, total: %d", r.reportTitle, mps, r.c) + case n := <-r.result: + r.c += uint64(n) + } + } +} + +func ack(ctx context.Context, sub string, ackID ...string) { + err := pubsub.Ack(ctx, sub, ackID...) + if err != nil { + log.Printf("Ack failed, %v\n", err) + } +} + +func pullLoop(ctx context.Context, sub string, result chan<- int) { + for { + msgs, err := pubsub.PullWait(ctx, sub, *size) + if err != nil { + log.Printf("PullWait failed, %v\n", err) + time.Sleep(5 * time.Second) + continue + } + if len(msgs) == 0 { + log.Println("Received no messages") + continue + } + if *reportMPS { + result <- len(msgs) + } + ackIDs := make([]string, len(msgs)) + for i, msg := range msgs { + if !*reportMPS { + fmt.Printf("Got a message: %s\n", msg.Data) + } + ackIDs[i] = msg.AckID + } + go ack(ctx, sub, ackIDs...) + } +} + +func pullMessages(ctx context.Context, argv []string) { + checkArgs(argv, 3) + sub := argv[1] + workers, err := strconv.Atoi(argv[2]) + if err != nil { + log.Fatalf("Atoi failed, %v", err) + } + result := make(chan int, 1024) + for i := 0; i < int(workers); i++ { + go pullLoop(ctx, sub, result) + } + if *reportMPS { + r := reporter{reportTitle: "Received", result: result} + r.report() + } else { + select {} + } +} + +func publishLoop(ctx context.Context, topic string, workerid int, result chan<- int) { + var r uint64 + for { + msgs := make([]*pubsub.Message, *size) + for i := 0; i < *size; i++ { + msgs[i] = &pubsub.Message{ + Data: []byte(fmt.Sprintf("Worker: %d, Round: %d, Message: %d", workerid, r, i)), + } + } + _, err := pubsub.Publish(ctx, topic, msgs...) + if err != nil { + log.Printf("Publish failed, %v\n", err) + return + } + r++ + if *reportMPS { + result <- *size + } + } +} + +func publishMessages(ctx context.Context, argv []string) { + checkArgs(argv, 3) + topic := argv[1] + workers, err := strconv.Atoi(argv[2]) + if err != nil { + log.Fatalf("Atoi failed, %v", err) + } + result := make(chan int, 1024) + for i := 0; i < int(workers); i++ { + go publishLoop(ctx, topic, i, result) + } + if *reportMPS { + r := reporter{reportTitle: "Sent", result: result} + r.report() + } else { + select {} + } +} + +// This example demonstrates calling the Cloud Pub/Sub API. As of 22 +// Oct 2014, the Cloud Pub/Sub API is only available if you're +// whitelisted. If you're interested in using it, please apply for the +// Limited Preview program at the following form: +// http://goo.gl/Wql9HL +// +// Also, before running this example, be sure to enable Cloud Pub/Sub +// service on your project in Developer Console at: +// https://console.developers.google.com/ +// +// Unless you run this sample on Compute Engine instance, please +// create a new service account and download a JSON key file for it at +// the developer console: https://console.developers.google.com/ +// +// It has the following subcommands: +// +// create_topic +// delete_topic +// create_subscription +// delete_subscription +// publish +// pull_messages +// publish_messages +// +// You can choose any names for topic and subscription as long as they +// follow the naming rule described at: +// https://cloud.google.com/pubsub/overview#names +// +// You can create/delete topics/subscriptions by self-explanatory +// subcommands. +// +// The "publish" subcommand is for publishing a single message to a +// specified Cloud Pub/Sub topic. +// +// The "pull_messages" subcommand is for continuously pulling messages +// from a specified Cloud Pub/Sub subscription with specified number +// of workers. +// +// The "publish_messages" subcommand is for continuously publishing +// messages to a specified Cloud Pub/Sub topic with specified number +// of workers. +func main() { + flag.Parse() + argv := flag.Args() + checkArgs(argv, 1) + client, err := newClient(*jsonFile) + if err != nil { + log.Fatalf("clientAndId failed, %v", err) + } + if *projID == "" { + usageAndExit("Please specify Project ID.") + } + ctx := cloud.NewContext(*projID, client) + m := map[string]func(ctx context.Context, argv []string){ + "create_topic": createTopic, + "delete_topic": deleteTopic, + "create_subscription": createSubscription, + "delete_subscription": deleteSubscription, + "publish": publish, + "pull_messages": pullMessages, + "publish_messages": publishMessages, + } + subcommand := argv[0] + f, ok := m[subcommand] + if !ok { + usageAndExit(fmt.Sprintf("Function not found for %s", subcommand)) + } + f(ctx, argv) +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/examples/storage/appengine/app.go b/Godeps/_workspace/src/google.golang.org/cloud/examples/storage/appengine/app.go new file mode 100644 index 000000000..0889f1256 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/examples/storage/appengine/app.go @@ -0,0 +1,401 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package gcsdemo is an example App Engine app using the Google Cloud Storage API. +package gcsdemo + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "google.golang.org/appengine" + "google.golang.org/appengine/file" + "google.golang.org/appengine/log" + "google.golang.org/appengine/urlfetch" + "google.golang.org/cloud" + "google.golang.org/cloud/storage" +) + +// bucket is a local cache of the app's default bucket name. +var bucket string // or: var bucket = ".appspot.com" + +func init() { + http.HandleFunc("/", handler) +} + +// demo struct holds information needed to run the various demo functions. +type demo struct { + c context.Context + w http.ResponseWriter + ctx context.Context + // cleanUp is a list of filenames that need cleaning up at the end of the demo. + cleanUp []string + // failed indicates that one or more of the demo steps failed. + failed bool +} + +func (d *demo) errorf(format string, args ...interface{}) { + d.failed = true + log.Errorf(d.c, format, args...) +} + +// handler is the main demo entry point that calls the GCS operations. +func handler(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/" { + http.NotFound(w, r) + return + } + c := appengine.NewContext(r) + if bucket == "" { + var err error + if bucket, err = file.DefaultBucketName(c); err != nil { + log.Errorf(c, "failed to get default GCS bucket name: %v", err) + return + } + } + hc := &http.Client{ + Transport: &oauth2.Transport{ + Source: google.AppEngineTokenSource(c, storage.ScopeFullControl), + // Note that the App Engine urlfetch service has a limit of 10MB uploads and + // 32MB downloads. + // See https://cloud.google.com/appengine/docs/go/urlfetch/#Go_Quotas_and_limits + // for more information. + Base: &urlfetch.Transport{Context: c}, + }, + } + ctx := cloud.NewContext(appengine.AppID(c), hc) + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + fmt.Fprintf(w, "Demo GCS Application running from Version: %v\n", appengine.VersionID(c)) + fmt.Fprintf(w, "Using bucket name: %v\n\n", bucket) + + d := &demo{ + c: c, + w: w, + ctx: ctx, + } + + n := "demo-testfile-go" + d.createFile(n) + d.readFile(n) + d.copyFile(n) + d.statFile(n) + d.createListFiles() + d.listBucket() + d.listBucketDirMode() + d.defaultACL() + d.putDefaultACLRule() + d.deleteDefaultACLRule() + d.bucketACL() + d.putBucketACLRule() + d.deleteBucketACLRule() + d.acl(n) + d.putACLRule(n) + d.deleteACLRule(n) + d.deleteFiles() + + if d.failed { + io.WriteString(w, "\nDemo failed.\n") + } else { + io.WriteString(w, "\nDemo succeeded.\n") + } +} + +// createFile creates a file in Google Cloud Storage. +func (d *demo) createFile(fileName string) { + fmt.Fprintf(d.w, "Creating file /%v/%v\n", bucket, fileName) + + wc := storage.NewWriter(d.ctx, bucket, fileName) + wc.ContentType = "text/plain" + wc.Metadata = map[string]string{ + "x-goog-meta-foo": "foo", + "x-goog-meta-bar": "bar", + } + d.cleanUp = append(d.cleanUp, fileName) + + if _, err := wc.Write([]byte("abcde\n")); err != nil { + d.errorf("createFile: unable to write data to bucket %q, file %q: %v", bucket, fileName, err) + return + } + if _, err := wc.Write([]byte(strings.Repeat("f", 1024*4) + "\n")); err != nil { + d.errorf("createFile: unable to write data to bucket %q, file %q: %v", bucket, fileName, err) + return + } + if err := wc.Close(); err != nil { + d.errorf("createFile: unable to close bucket %q, file %q: %v", bucket, fileName, err) + return + } +} + +// readFile reads the named file in Google Cloud Storage. +func (d *demo) readFile(fileName string) { + io.WriteString(d.w, "\nAbbreviated file content (first line and last 1K):\n") + + rc, err := storage.NewReader(d.ctx, bucket, fileName) + if err != nil { + d.errorf("readFile: unable to open file from bucket %q, file %q: %v", bucket, fileName, err) + return + } + defer rc.Close() + slurp, err := ioutil.ReadAll(rc) + if err != nil { + d.errorf("readFile: unable to read data from bucket %q, file %q: %v", bucket, fileName, err) + return + } + + fmt.Fprintf(d.w, "%s\n", bytes.SplitN(slurp, []byte("\n"), 2)[0]) + if len(slurp) > 1024 { + fmt.Fprintf(d.w, "...%s\n", slurp[len(slurp)-1024:]) + } else { + fmt.Fprintf(d.w, "%s\n", slurp) + } +} + +// copyFile copies a file in Google Cloud Storage. +func (d *demo) copyFile(fileName string) { + copyName := fileName + "-copy" + fmt.Fprintf(d.w, "Copying file /%v/%v to /%v/%v:\n", bucket, fileName, bucket, copyName) + + obj, err := storage.CopyObject(d.ctx, bucket, fileName, bucket, copyName, nil) + if err != nil { + d.errorf("copyFile: unable to copy /%v/%v to bucket %q, file %q: %v", bucket, fileName, bucket, copyName, err) + return + } + d.cleanUp = append(d.cleanUp, copyName) + + d.dumpStats(obj) +} + +func (d *demo) dumpStats(obj *storage.Object) { + fmt.Fprintf(d.w, "(filename: /%v/%v, ", obj.Bucket, obj.Name) + fmt.Fprintf(d.w, "ContentType: %q, ", obj.ContentType) + fmt.Fprintf(d.w, "ACL: %#v, ", obj.ACL) + fmt.Fprintf(d.w, "Owner: %v, ", obj.Owner) + fmt.Fprintf(d.w, "ContentEncoding: %q, ", obj.ContentEncoding) + fmt.Fprintf(d.w, "Size: %v, ", obj.Size) + fmt.Fprintf(d.w, "MD5: %q, ", obj.MD5) + fmt.Fprintf(d.w, "CRC32C: %q, ", obj.CRC32C) + fmt.Fprintf(d.w, "Metadata: %#v, ", obj.Metadata) + fmt.Fprintf(d.w, "MediaLink: %q, ", obj.MediaLink) + fmt.Fprintf(d.w, "StorageClass: %q, ", obj.StorageClass) + if !obj.Deleted.IsZero() { + fmt.Fprintf(d.w, "Deleted: %v, ", obj.Deleted) + } + fmt.Fprintf(d.w, "Updated: %v)\n", obj.Updated) +} + +// statFile reads the stats of the named file in Google Cloud Storage. +func (d *demo) statFile(fileName string) { + io.WriteString(d.w, "\nFile stat:\n") + + obj, err := storage.StatObject(d.ctx, bucket, fileName) + if err != nil { + d.errorf("statFile: unable to stat file from bucket %q, file %q: %v", bucket, fileName, err) + return + } + + d.dumpStats(obj) +} + +// createListFiles creates files that will be used by listBucket. +func (d *demo) createListFiles() { + io.WriteString(d.w, "\nCreating more files for listbucket...\n") + for _, n := range []string{"foo1", "foo2", "bar", "bar/1", "bar/2", "boo/"} { + d.createFile(n) + } +} + +// listBucket lists the contents of a bucket in Google Cloud Storage. +func (d *demo) listBucket() { + io.WriteString(d.w, "\nListbucket result:\n") + + query := &storage.Query{Prefix: "foo"} + for query != nil { + objs, err := storage.ListObjects(d.ctx, bucket, query) + if err != nil { + d.errorf("listBucket: unable to list bucket %q: %v", bucket, err) + return + } + query = objs.Next + + for _, obj := range objs.Results { + d.dumpStats(obj) + } + } +} + +func (d *demo) listDir(name, indent string) { + query := &storage.Query{Prefix: name, Delimiter: "/"} + for query != nil { + objs, err := storage.ListObjects(d.ctx, bucket, query) + if err != nil { + d.errorf("listBucketDirMode: unable to list bucket %q: %v", bucket, err) + return + } + query = objs.Next + + for _, obj := range objs.Results { + fmt.Fprint(d.w, indent) + d.dumpStats(obj) + } + for _, dir := range objs.Prefixes { + fmt.Fprintf(d.w, "%v(directory: /%v/%v)\n", indent, bucket, dir) + d.listDir(dir, indent+" ") + } + } +} + +// listBucketDirMode lists the contents of a bucket in dir mode in Google Cloud Storage. +func (d *demo) listBucketDirMode() { + io.WriteString(d.w, "\nListbucket directory mode result:\n") + d.listDir("b", "") +} + +// dumpDefaultACL prints out the default object ACL for this bucket. +func (d *demo) dumpDefaultACL() { + acl, err := storage.DefaultACL(d.ctx, bucket) + if err != nil { + d.errorf("defaultACL: unable to list default object ACL for bucket %q: %v", bucket, err) + return + } + for _, v := range acl { + fmt.Fprintf(d.w, "Entity: %q, Role: %q\n", v.Entity, v.Role) + } +} + +// defaultACL displays the default object ACL for this bucket. +func (d *demo) defaultACL() { + io.WriteString(d.w, "\nDefault object ACL:\n") + d.dumpDefaultACL() +} + +// putDefaultACLRule adds the "allUsers" default object ACL rule for this bucket. +func (d *demo) putDefaultACLRule() { + io.WriteString(d.w, "\nPut Default object ACL Rule:\n") + err := storage.PutDefaultACLRule(d.ctx, bucket, "allUsers", storage.RoleReader) + if err != nil { + d.errorf("putDefaultACLRule: unable to save default object ACL rule for bucket %q: %v", bucket, err) + return + } + d.dumpDefaultACL() +} + +// deleteDefaultACLRule deleted the "allUsers" default object ACL rule for this bucket. +func (d *demo) deleteDefaultACLRule() { + io.WriteString(d.w, "\nDelete Default object ACL Rule:\n") + err := storage.DeleteDefaultACLRule(d.ctx, bucket, "allUsers") + if err != nil { + d.errorf("deleteDefaultACLRule: unable to delete default object ACL rule for bucket %q: %v", bucket, err) + return + } + d.dumpDefaultACL() +} + +// dumpBucketACL prints out the bucket ACL. +func (d *demo) dumpBucketACL() { + acl, err := storage.BucketACL(d.ctx, bucket) + if err != nil { + d.errorf("dumpBucketACL: unable to list bucket ACL for bucket %q: %v", bucket, err) + return + } + for _, v := range acl { + fmt.Fprintf(d.w, "Entity: %q, Role: %q\n", v.Entity, v.Role) + } +} + +// bucketACL displays the bucket ACL for this bucket. +func (d *demo) bucketACL() { + io.WriteString(d.w, "\nBucket ACL:\n") + d.dumpBucketACL() +} + +// putBucketACLRule adds the "allUsers" bucket ACL rule for this bucket. +func (d *demo) putBucketACLRule() { + io.WriteString(d.w, "\nPut Bucket ACL Rule:\n") + err := storage.PutBucketACLRule(d.ctx, bucket, "allUsers", storage.RoleReader) + if err != nil { + d.errorf("putBucketACLRule: unable to save bucket ACL rule for bucket %q: %v", bucket, err) + return + } + d.dumpBucketACL() +} + +// deleteBucketACLRule deleted the "allUsers" bucket ACL rule for this bucket. +func (d *demo) deleteBucketACLRule() { + io.WriteString(d.w, "\nDelete Bucket ACL Rule:\n") + err := storage.DeleteBucketACLRule(d.ctx, bucket, "allUsers") + if err != nil { + d.errorf("deleteBucketACLRule: unable to delete bucket ACL rule for bucket %q: %v", bucket, err) + return + } + d.dumpBucketACL() +} + +// dumpACL prints out the ACL of the named file. +func (d *demo) dumpACL(fileName string) { + acl, err := storage.ACL(d.ctx, bucket, fileName) + if err != nil { + d.errorf("dumpACL: unable to list file ACL for bucket %q, file %q: %v", bucket, fileName, err) + return + } + for _, v := range acl { + fmt.Fprintf(d.w, "Entity: %q, Role: %q\n", v.Entity, v.Role) + } +} + +// acl displays the ACL for the named file. +func (d *demo) acl(fileName string) { + fmt.Fprintf(d.w, "\nACL for file %v:\n", fileName) + d.dumpACL(fileName) +} + +// putACLRule adds the "allUsers" ACL rule for the named file. +func (d *demo) putACLRule(fileName string) { + fmt.Fprintf(d.w, "\nPut ACL rule for file %v:\n", fileName) + err := storage.PutACLRule(d.ctx, bucket, fileName, "allUsers", storage.RoleReader) + if err != nil { + d.errorf("putACLRule: unable to save ACL rule for bucket %q, file %q: %v", bucket, fileName, err) + return + } + d.dumpACL(fileName) +} + +// deleteACLRule deleted the "allUsers" ACL rule for the named file. +func (d *demo) deleteACLRule(fileName string) { + fmt.Fprintf(d.w, "\nDelete ACL rule for file %v:\n", fileName) + err := storage.DeleteACLRule(d.ctx, bucket, fileName, "allUsers") + if err != nil { + d.errorf("deleteACLRule: unable to delete ACL rule for bucket %q, file %q: %v", bucket, fileName, err) + return + } + d.dumpACL(fileName) +} + +// deleteFiles deletes all the temporary files from a bucket created by this demo. +func (d *demo) deleteFiles() { + io.WriteString(d.w, "\nDeleting files...\n") + for _, v := range d.cleanUp { + fmt.Fprintf(d.w, "Deleting file %v\n", v) + if err := storage.DeleteObject(d.ctx, bucket, v); err != nil { + d.errorf("deleteFiles: unable to delete bucket %q, file %q: %v", bucket, v, err) + return + } + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/examples/storage/appengine/app.yaml b/Godeps/_workspace/src/google.golang.org/cloud/examples/storage/appengine/app.yaml new file mode 100644 index 000000000..e98d03087 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/examples/storage/appengine/app.yaml @@ -0,0 +1,8 @@ +application: +version: v1 +runtime: go +api_version: go1 + +handlers: +- url: /.* + script: _go_app diff --git a/Godeps/_workspace/src/google.golang.org/cloud/examples/storage/appenginevm/app.go b/Godeps/_workspace/src/google.golang.org/cloud/examples/storage/appenginevm/app.go new file mode 100644 index 000000000..47c7e927b --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/examples/storage/appenginevm/app.go @@ -0,0 +1,396 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package main is an example Mananged VM app using the Google Cloud Storage API. +package main + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "google.golang.org/appengine" + "google.golang.org/appengine/file" + "google.golang.org/appengine/log" + "google.golang.org/cloud" + "google.golang.org/cloud/storage" +) + +// bucket is a local cache of the app's default bucket name. +var bucket string // or: var bucket = ".appspot.com" + +func main() { + http.HandleFunc("/", handler) + appengine.Main() +} + +// demo struct holds information needed to run the various demo functions. +type demo struct { + c context.Context + w http.ResponseWriter + ctx context.Context + // cleanUp is a list of filenames that need cleaning up at the end of the demo. + cleanUp []string + // failed indicates that one or more of the demo steps failed. + failed bool +} + +func (d *demo) errorf(format string, args ...interface{}) { + d.failed = true + log.Errorf(d.c, format, args...) +} + +// handler is the main demo entry point that calls the GCS operations. +func handler(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/" { + http.NotFound(w, r) + return + } + c := appengine.NewContext(r) + if bucket == "" { + var err error + if bucket, err = file.DefaultBucketName(c); err != nil { + log.Errorf(c, "failed to get default GCS bucket name: %v", err) + return + } + } + hc := &http.Client{ + Transport: &oauth2.Transport{ + Source: google.AppEngineTokenSource(c, storage.ScopeFullControl), + }, + } + ctx := cloud.NewContext(appengine.AppID(c), hc) + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + fmt.Fprintf(w, "Demo GCS Application running from Version: %v\n", appengine.VersionID(c)) + fmt.Fprintf(w, "Using bucket name: %v\n\n", bucket) + + d := &demo{ + c: c, + w: w, + ctx: ctx, + } + + n := "demo-testfile-go" + d.createFile(n) + d.readFile(n) + d.copyFile(n) + d.statFile(n) + d.createListFiles() + d.listBucket() + d.listBucketDirMode() + d.defaultACL() + d.putDefaultACLRule() + d.deleteDefaultACLRule() + d.bucketACL() + d.putBucketACLRule() + d.deleteBucketACLRule() + d.acl(n) + d.putACLRule(n) + d.deleteACLRule(n) + d.deleteFiles() + + if d.failed { + io.WriteString(w, "\nDemo failed.\n") + } else { + io.WriteString(w, "\nDemo succeeded.\n") + } +} + +// createFile creates a file in Google Cloud Storage. +func (d *demo) createFile(fileName string) { + fmt.Fprintf(d.w, "Creating file /%v/%v\n", bucket, fileName) + + wc := storage.NewWriter(d.ctx, bucket, fileName) + wc.ContentType = "text/plain" + wc.Metadata = map[string]string{ + "x-goog-meta-foo": "foo", + "x-goog-meta-bar": "bar", + } + d.cleanUp = append(d.cleanUp, fileName) + + if _, err := wc.Write([]byte("abcde\n")); err != nil { + d.errorf("createFile: unable to write data to bucket %q, file %q: %v", bucket, fileName, err) + return + } + if _, err := wc.Write([]byte(strings.Repeat("f", 1024*4) + "\n")); err != nil { + d.errorf("createFile: unable to write data to bucket %q, file %q: %v", bucket, fileName, err) + return + } + if err := wc.Close(); err != nil { + d.errorf("createFile: unable to close bucket %q, file %q: %v", bucket, fileName, err) + return + } +} + +// readFile reads the named file in Google Cloud Storage. +func (d *demo) readFile(fileName string) { + io.WriteString(d.w, "\nAbbreviated file content (first line and last 1K):\n") + + rc, err := storage.NewReader(d.ctx, bucket, fileName) + if err != nil { + d.errorf("readFile: unable to open file from bucket %q, file %q: %v", bucket, fileName, err) + return + } + defer rc.Close() + slurp, err := ioutil.ReadAll(rc) + if err != nil { + d.errorf("readFile: unable to read data from bucket %q, file %q: %v", bucket, fileName, err) + return + } + + fmt.Fprintf(d.w, "%s\n", bytes.SplitN(slurp, []byte("\n"), 2)[0]) + if len(slurp) > 1024 { + fmt.Fprintf(d.w, "...%s\n", slurp[len(slurp)-1024:]) + } else { + fmt.Fprintf(d.w, "%s\n", slurp) + } +} + +// copyFile copies a file in Google Cloud Storage. +func (d *demo) copyFile(fileName string) { + copyName := fileName + "-copy" + fmt.Fprintf(d.w, "Copying file /%v/%v to /%v/%v:\n", bucket, fileName, bucket, copyName) + + obj, err := storage.CopyObject(d.ctx, bucket, fileName, bucket, copyName, nil) + if err != nil { + d.errorf("copyFile: unable to copy /%v/%v to bucket %q, file %q: %v", bucket, fileName, bucket, copyName, err) + return + } + d.cleanUp = append(d.cleanUp, copyName) + + d.dumpStats(obj) +} + +func (d *demo) dumpStats(obj *storage.Object) { + fmt.Fprintf(d.w, "(filename: /%v/%v, ", obj.Bucket, obj.Name) + fmt.Fprintf(d.w, "ContentType: %q, ", obj.ContentType) + fmt.Fprintf(d.w, "ACL: %#v, ", obj.ACL) + fmt.Fprintf(d.w, "Owner: %v, ", obj.Owner) + fmt.Fprintf(d.w, "ContentEncoding: %q, ", obj.ContentEncoding) + fmt.Fprintf(d.w, "Size: %v, ", obj.Size) + fmt.Fprintf(d.w, "MD5: %q, ", obj.MD5) + fmt.Fprintf(d.w, "CRC32C: %q, ", obj.CRC32C) + fmt.Fprintf(d.w, "Metadata: %#v, ", obj.Metadata) + fmt.Fprintf(d.w, "MediaLink: %q, ", obj.MediaLink) + fmt.Fprintf(d.w, "StorageClass: %q, ", obj.StorageClass) + if !obj.Deleted.IsZero() { + fmt.Fprintf(d.w, "Deleted: %v, ", obj.Deleted) + } + fmt.Fprintf(d.w, "Updated: %v)\n", obj.Updated) +} + +// statFile reads the stats of the named file in Google Cloud Storage. +func (d *demo) statFile(fileName string) { + io.WriteString(d.w, "\nFile stat:\n") + + obj, err := storage.StatObject(d.ctx, bucket, fileName) + if err != nil { + d.errorf("statFile: unable to stat file from bucket %q, file %q: %v", bucket, fileName, err) + return + } + + d.dumpStats(obj) +} + +// createListFiles creates files that will be used by listBucket. +func (d *demo) createListFiles() { + io.WriteString(d.w, "\nCreating more files for listbucket...\n") + for _, n := range []string{"foo1", "foo2", "bar", "bar/1", "bar/2", "boo/"} { + d.createFile(n) + } +} + +// listBucket lists the contents of a bucket in Google Cloud Storage. +func (d *demo) listBucket() { + io.WriteString(d.w, "\nListbucket result:\n") + + query := &storage.Query{Prefix: "foo"} + for query != nil { + objs, err := storage.ListObjects(d.ctx, bucket, query) + if err != nil { + d.errorf("listBucket: unable to list bucket %q: %v", bucket, err) + return + } + query = objs.Next + + for _, obj := range objs.Results { + d.dumpStats(obj) + } + } +} + +func (d *demo) listDir(name, indent string) { + query := &storage.Query{Prefix: name, Delimiter: "/"} + for query != nil { + objs, err := storage.ListObjects(d.ctx, bucket, query) + if err != nil { + d.errorf("listBucketDirMode: unable to list bucket %q: %v", bucket, err) + return + } + query = objs.Next + + for _, obj := range objs.Results { + fmt.Fprint(d.w, indent) + d.dumpStats(obj) + } + for _, dir := range objs.Prefixes { + fmt.Fprintf(d.w, "%v(directory: /%v/%v)\n", indent, bucket, dir) + d.listDir(dir, indent+" ") + } + } +} + +// listBucketDirMode lists the contents of a bucket in dir mode in Google Cloud Storage. +func (d *demo) listBucketDirMode() { + io.WriteString(d.w, "\nListbucket directory mode result:\n") + d.listDir("b", "") +} + +// dumpDefaultACL prints out the default object ACL for this bucket. +func (d *demo) dumpDefaultACL() { + acl, err := storage.DefaultACL(d.ctx, bucket) + if err != nil { + d.errorf("defaultACL: unable to list default object ACL for bucket %q: %v", bucket, err) + return + } + for _, v := range acl { + fmt.Fprintf(d.w, "Entity: %q, Role: %q\n", v.Entity, v.Role) + } +} + +// defaultACL displays the default object ACL for this bucket. +func (d *demo) defaultACL() { + io.WriteString(d.w, "\nDefault object ACL:\n") + d.dumpDefaultACL() +} + +// putDefaultACLRule adds the "allUsers" default object ACL rule for this bucket. +func (d *demo) putDefaultACLRule() { + io.WriteString(d.w, "\nPut Default object ACL Rule:\n") + err := storage.PutDefaultACLRule(d.ctx, bucket, "allUsers", storage.RoleReader) + if err != nil { + d.errorf("putDefaultACLRule: unable to save default object ACL rule for bucket %q: %v", bucket, err) + return + } + d.dumpDefaultACL() +} + +// deleteDefaultACLRule deleted the "allUsers" default object ACL rule for this bucket. +func (d *demo) deleteDefaultACLRule() { + io.WriteString(d.w, "\nDelete Default object ACL Rule:\n") + err := storage.DeleteDefaultACLRule(d.ctx, bucket, "allUsers") + if err != nil { + d.errorf("deleteDefaultACLRule: unable to delete default object ACL rule for bucket %q: %v", bucket, err) + return + } + d.dumpDefaultACL() +} + +// dumpBucketACL prints out the bucket ACL. +func (d *demo) dumpBucketACL() { + acl, err := storage.BucketACL(d.ctx, bucket) + if err != nil { + d.errorf("dumpBucketACL: unable to list bucket ACL for bucket %q: %v", bucket, err) + return + } + for _, v := range acl { + fmt.Fprintf(d.w, "Entity: %q, Role: %q\n", v.Entity, v.Role) + } +} + +// bucketACL displays the bucket ACL for this bucket. +func (d *demo) bucketACL() { + io.WriteString(d.w, "\nBucket ACL:\n") + d.dumpBucketACL() +} + +// putBucketACLRule adds the "allUsers" bucket ACL rule for this bucket. +func (d *demo) putBucketACLRule() { + io.WriteString(d.w, "\nPut Bucket ACL Rule:\n") + err := storage.PutBucketACLRule(d.ctx, bucket, "allUsers", storage.RoleReader) + if err != nil { + d.errorf("putBucketACLRule: unable to save bucket ACL rule for bucket %q: %v", bucket, err) + return + } + d.dumpBucketACL() +} + +// deleteBucketACLRule deleted the "allUsers" bucket ACL rule for this bucket. +func (d *demo) deleteBucketACLRule() { + io.WriteString(d.w, "\nDelete Bucket ACL Rule:\n") + err := storage.DeleteBucketACLRule(d.ctx, bucket, "allUsers") + if err != nil { + d.errorf("deleteBucketACLRule: unable to delete bucket ACL rule for bucket %q: %v", bucket, err) + return + } + d.dumpBucketACL() +} + +// dumpACL prints out the ACL of the named file. +func (d *demo) dumpACL(fileName string) { + acl, err := storage.ACL(d.ctx, bucket, fileName) + if err != nil { + d.errorf("dumpACL: unable to list file ACL for bucket %q, file %q: %v", bucket, fileName, err) + return + } + for _, v := range acl { + fmt.Fprintf(d.w, "Entity: %q, Role: %q\n", v.Entity, v.Role) + } +} + +// acl displays the ACL for the named file. +func (d *demo) acl(fileName string) { + fmt.Fprintf(d.w, "\nACL for file %v:\n", fileName) + d.dumpACL(fileName) +} + +// putACLRule adds the "allUsers" ACL rule for the named file. +func (d *demo) putACLRule(fileName string) { + fmt.Fprintf(d.w, "\nPut ACL rule for file %v:\n", fileName) + err := storage.PutACLRule(d.ctx, bucket, fileName, "allUsers", storage.RoleReader) + if err != nil { + d.errorf("putACLRule: unable to save ACL rule for bucket %q, file %q: %v", bucket, fileName, err) + return + } + d.dumpACL(fileName) +} + +// deleteACLRule deleted the "allUsers" ACL rule for the named file. +func (d *demo) deleteACLRule(fileName string) { + fmt.Fprintf(d.w, "\nDelete ACL rule for file %v:\n", fileName) + err := storage.DeleteACLRule(d.ctx, bucket, fileName, "allUsers") + if err != nil { + d.errorf("deleteACLRule: unable to delete ACL rule for bucket %q, file %q: %v", bucket, fileName, err) + return + } + d.dumpACL(fileName) +} + +// deleteFiles deletes all the temporary files from a bucket created by this demo. +func (d *demo) deleteFiles() { + io.WriteString(d.w, "\nDeleting files...\n") + for _, v := range d.cleanUp { + fmt.Fprintf(d.w, "Deleting file %v\n", v) + if err := storage.DeleteObject(d.ctx, bucket, v); err != nil { + d.errorf("deleteFiles: unable to delete bucket %q, file %q: %v", bucket, v, err) + return + } + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/examples/storage/appenginevm/app.yaml b/Godeps/_workspace/src/google.golang.org/cloud/examples/storage/appenginevm/app.yaml new file mode 100644 index 000000000..6847fc871 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/examples/storage/appenginevm/app.yaml @@ -0,0 +1,10 @@ +runtime: go +api_version: go1 +vm: true + +manual_scaling: + instances: 1 + +handlers: +- url: /.* + script: _go_app diff --git a/Godeps/_workspace/src/google.golang.org/cloud/internal/cloud.go b/Godeps/_workspace/src/google.golang.org/cloud/internal/cloud.go new file mode 100644 index 000000000..8b0db1b5d --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/internal/cloud.go @@ -0,0 +1,128 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package internal provides support for the cloud packages. +// +// Users should not import this package directly. +package internal + +import ( + "fmt" + "net/http" + "sync" + + "golang.org/x/net/context" +) + +type contextKey struct{} + +func WithContext(parent context.Context, projID string, c *http.Client) context.Context { + if c == nil { + panic("nil *http.Client passed to WithContext") + } + if projID == "" { + panic("empty project ID passed to WithContext") + } + return context.WithValue(parent, contextKey{}, &cloudContext{ + ProjectID: projID, + HTTPClient: c, + }) +} + +const userAgent = "gcloud-golang/0.1" + +type cloudContext struct { + ProjectID string + HTTPClient *http.Client + + mu sync.Mutex // guards svc + svc map[string]interface{} // e.g. "storage" => *rawStorage.Service +} + +// Service returns the result of the fill function if it's never been +// called before for the given name (which is assumed to be an API +// service name, like "datastore"). If it has already been cached, the fill +// func is not run. +// It's safe for concurrent use by multiple goroutines. +func Service(ctx context.Context, name string, fill func(*http.Client) interface{}) interface{} { + return cc(ctx).service(name, fill) +} + +func (c *cloudContext) service(name string, fill func(*http.Client) interface{}) interface{} { + c.mu.Lock() + defer c.mu.Unlock() + + if c.svc == nil { + c.svc = make(map[string]interface{}) + } else if v, ok := c.svc[name]; ok { + return v + } + v := fill(c.HTTPClient) + c.svc[name] = v + return v +} + +// Transport is an http.RoundTripper that appends +// Google Cloud client's user-agent to the original +// request's user-agent header. +type Transport struct { + // Base represents the actual http.RoundTripper + // the requests will be delegated to. + Base http.RoundTripper +} + +// RoundTrip appends a user-agent to the existing user-agent +// header and delegates the request to the base http.RoundTripper. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + req = cloneRequest(req) + ua := req.Header.Get("User-Agent") + if ua == "" { + ua = userAgent + } else { + ua = fmt.Sprintf("%s %s", ua, userAgent) + } + req.Header.Set("User-Agent", ua) + return t.Base.RoundTrip(req) +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header) + for k, s := range r.Header { + r2.Header[k] = s + } + return r2 +} + +func ProjID(ctx context.Context) string { + return cc(ctx).ProjectID +} + +func HTTPClient(ctx context.Context) *http.Client { + return cc(ctx).HTTPClient +} + +// cc returns the internal *cloudContext (cc) state for a context.Context. +// It panics if the user did it wrong. +func cc(ctx context.Context) *cloudContext { + if c, ok := ctx.Value(contextKey{}).(*cloudContext); ok { + return c + } + panic("invalid context.Context type; it should be created with cloud.NewContext") +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.pb.go b/Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.pb.go new file mode 100644 index 000000000..9cb9be528 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.pb.go @@ -0,0 +1,1633 @@ +// Code generated by protoc-gen-go. +// source: datastore_v1.proto +// DO NOT EDIT! + +/* +Package datastore is a generated protocol buffer package. + +It is generated from these files: + datastore_v1.proto + +It has these top-level messages: + PartitionId + Key + Value + Property + Entity + EntityResult + Query + KindExpression + PropertyReference + PropertyExpression + PropertyOrder + Filter + CompositeFilter + PropertyFilter + GqlQuery + GqlQueryArg + QueryResultBatch + Mutation + MutationResult + ReadOptions + LookupRequest + LookupResponse + RunQueryRequest + RunQueryResponse + BeginTransactionRequest + BeginTransactionResponse + RollbackRequest + RollbackResponse + CommitRequest + CommitResponse + AllocateIdsRequest + AllocateIdsResponse +*/ +package datastore + +import proto "github.com/golang/protobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +// Specifies what data the 'entity' field contains. +// A ResultType is either implied (for example, in LookupResponse.found it +// is always FULL) or specified by context (for example, in message +// QueryResultBatch, field 'entity_result_type' specifies a ResultType +// for all the values in field 'entity_result'). +type EntityResult_ResultType int32 + +const ( + EntityResult_FULL EntityResult_ResultType = 1 + EntityResult_PROJECTION EntityResult_ResultType = 2 + // The entity may have no key. + // A property value may have meaning 18. + EntityResult_KEY_ONLY EntityResult_ResultType = 3 +) + +var EntityResult_ResultType_name = map[int32]string{ + 1: "FULL", + 2: "PROJECTION", + 3: "KEY_ONLY", +} +var EntityResult_ResultType_value = map[string]int32{ + "FULL": 1, + "PROJECTION": 2, + "KEY_ONLY": 3, +} + +func (x EntityResult_ResultType) Enum() *EntityResult_ResultType { + p := new(EntityResult_ResultType) + *p = x + return p +} +func (x EntityResult_ResultType) String() string { + return proto.EnumName(EntityResult_ResultType_name, int32(x)) +} +func (x *EntityResult_ResultType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(EntityResult_ResultType_value, data, "EntityResult_ResultType") + if err != nil { + return err + } + *x = EntityResult_ResultType(value) + return nil +} + +type PropertyExpression_AggregationFunction int32 + +const ( + PropertyExpression_FIRST PropertyExpression_AggregationFunction = 1 +) + +var PropertyExpression_AggregationFunction_name = map[int32]string{ + 1: "FIRST", +} +var PropertyExpression_AggregationFunction_value = map[string]int32{ + "FIRST": 1, +} + +func (x PropertyExpression_AggregationFunction) Enum() *PropertyExpression_AggregationFunction { + p := new(PropertyExpression_AggregationFunction) + *p = x + return p +} +func (x PropertyExpression_AggregationFunction) String() string { + return proto.EnumName(PropertyExpression_AggregationFunction_name, int32(x)) +} +func (x *PropertyExpression_AggregationFunction) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(PropertyExpression_AggregationFunction_value, data, "PropertyExpression_AggregationFunction") + if err != nil { + return err + } + *x = PropertyExpression_AggregationFunction(value) + return nil +} + +type PropertyOrder_Direction int32 + +const ( + PropertyOrder_ASCENDING PropertyOrder_Direction = 1 + PropertyOrder_DESCENDING PropertyOrder_Direction = 2 +) + +var PropertyOrder_Direction_name = map[int32]string{ + 1: "ASCENDING", + 2: "DESCENDING", +} +var PropertyOrder_Direction_value = map[string]int32{ + "ASCENDING": 1, + "DESCENDING": 2, +} + +func (x PropertyOrder_Direction) Enum() *PropertyOrder_Direction { + p := new(PropertyOrder_Direction) + *p = x + return p +} +func (x PropertyOrder_Direction) String() string { + return proto.EnumName(PropertyOrder_Direction_name, int32(x)) +} +func (x *PropertyOrder_Direction) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(PropertyOrder_Direction_value, data, "PropertyOrder_Direction") + if err != nil { + return err + } + *x = PropertyOrder_Direction(value) + return nil +} + +type CompositeFilter_Operator int32 + +const ( + CompositeFilter_AND CompositeFilter_Operator = 1 +) + +var CompositeFilter_Operator_name = map[int32]string{ + 1: "AND", +} +var CompositeFilter_Operator_value = map[string]int32{ + "AND": 1, +} + +func (x CompositeFilter_Operator) Enum() *CompositeFilter_Operator { + p := new(CompositeFilter_Operator) + *p = x + return p +} +func (x CompositeFilter_Operator) String() string { + return proto.EnumName(CompositeFilter_Operator_name, int32(x)) +} +func (x *CompositeFilter_Operator) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(CompositeFilter_Operator_value, data, "CompositeFilter_Operator") + if err != nil { + return err + } + *x = CompositeFilter_Operator(value) + return nil +} + +type PropertyFilter_Operator int32 + +const ( + PropertyFilter_LESS_THAN PropertyFilter_Operator = 1 + PropertyFilter_LESS_THAN_OR_EQUAL PropertyFilter_Operator = 2 + PropertyFilter_GREATER_THAN PropertyFilter_Operator = 3 + PropertyFilter_GREATER_THAN_OR_EQUAL PropertyFilter_Operator = 4 + PropertyFilter_EQUAL PropertyFilter_Operator = 5 + PropertyFilter_HAS_ANCESTOR PropertyFilter_Operator = 11 +) + +var PropertyFilter_Operator_name = map[int32]string{ + 1: "LESS_THAN", + 2: "LESS_THAN_OR_EQUAL", + 3: "GREATER_THAN", + 4: "GREATER_THAN_OR_EQUAL", + 5: "EQUAL", + 11: "HAS_ANCESTOR", +} +var PropertyFilter_Operator_value = map[string]int32{ + "LESS_THAN": 1, + "LESS_THAN_OR_EQUAL": 2, + "GREATER_THAN": 3, + "GREATER_THAN_OR_EQUAL": 4, + "EQUAL": 5, + "HAS_ANCESTOR": 11, +} + +func (x PropertyFilter_Operator) Enum() *PropertyFilter_Operator { + p := new(PropertyFilter_Operator) + *p = x + return p +} +func (x PropertyFilter_Operator) String() string { + return proto.EnumName(PropertyFilter_Operator_name, int32(x)) +} +func (x *PropertyFilter_Operator) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(PropertyFilter_Operator_value, data, "PropertyFilter_Operator") + if err != nil { + return err + } + *x = PropertyFilter_Operator(value) + return nil +} + +// The possible values for the 'more_results' field. +type QueryResultBatch_MoreResultsType int32 + +const ( + QueryResultBatch_NOT_FINISHED QueryResultBatch_MoreResultsType = 1 + QueryResultBatch_MORE_RESULTS_AFTER_LIMIT QueryResultBatch_MoreResultsType = 2 + // results after the limit. + QueryResultBatch_NO_MORE_RESULTS QueryResultBatch_MoreResultsType = 3 +) + +var QueryResultBatch_MoreResultsType_name = map[int32]string{ + 1: "NOT_FINISHED", + 2: "MORE_RESULTS_AFTER_LIMIT", + 3: "NO_MORE_RESULTS", +} +var QueryResultBatch_MoreResultsType_value = map[string]int32{ + "NOT_FINISHED": 1, + "MORE_RESULTS_AFTER_LIMIT": 2, + "NO_MORE_RESULTS": 3, +} + +func (x QueryResultBatch_MoreResultsType) Enum() *QueryResultBatch_MoreResultsType { + p := new(QueryResultBatch_MoreResultsType) + *p = x + return p +} +func (x QueryResultBatch_MoreResultsType) String() string { + return proto.EnumName(QueryResultBatch_MoreResultsType_name, int32(x)) +} +func (x *QueryResultBatch_MoreResultsType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(QueryResultBatch_MoreResultsType_value, data, "QueryResultBatch_MoreResultsType") + if err != nil { + return err + } + *x = QueryResultBatch_MoreResultsType(value) + return nil +} + +type ReadOptions_ReadConsistency int32 + +const ( + ReadOptions_DEFAULT ReadOptions_ReadConsistency = 0 + ReadOptions_STRONG ReadOptions_ReadConsistency = 1 + ReadOptions_EVENTUAL ReadOptions_ReadConsistency = 2 +) + +var ReadOptions_ReadConsistency_name = map[int32]string{ + 0: "DEFAULT", + 1: "STRONG", + 2: "EVENTUAL", +} +var ReadOptions_ReadConsistency_value = map[string]int32{ + "DEFAULT": 0, + "STRONG": 1, + "EVENTUAL": 2, +} + +func (x ReadOptions_ReadConsistency) Enum() *ReadOptions_ReadConsistency { + p := new(ReadOptions_ReadConsistency) + *p = x + return p +} +func (x ReadOptions_ReadConsistency) String() string { + return proto.EnumName(ReadOptions_ReadConsistency_name, int32(x)) +} +func (x *ReadOptions_ReadConsistency) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ReadOptions_ReadConsistency_value, data, "ReadOptions_ReadConsistency") + if err != nil { + return err + } + *x = ReadOptions_ReadConsistency(value) + return nil +} + +type BeginTransactionRequest_IsolationLevel int32 + +const ( + BeginTransactionRequest_SNAPSHOT BeginTransactionRequest_IsolationLevel = 0 + // conflict if their mutations conflict. For example: + // Read(A),Write(B) may not conflict with Read(B),Write(A), + // but Read(B),Write(B) does conflict with Read(B),Write(B). + BeginTransactionRequest_SERIALIZABLE BeginTransactionRequest_IsolationLevel = 1 +) + +var BeginTransactionRequest_IsolationLevel_name = map[int32]string{ + 0: "SNAPSHOT", + 1: "SERIALIZABLE", +} +var BeginTransactionRequest_IsolationLevel_value = map[string]int32{ + "SNAPSHOT": 0, + "SERIALIZABLE": 1, +} + +func (x BeginTransactionRequest_IsolationLevel) Enum() *BeginTransactionRequest_IsolationLevel { + p := new(BeginTransactionRequest_IsolationLevel) + *p = x + return p +} +func (x BeginTransactionRequest_IsolationLevel) String() string { + return proto.EnumName(BeginTransactionRequest_IsolationLevel_name, int32(x)) +} +func (x *BeginTransactionRequest_IsolationLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(BeginTransactionRequest_IsolationLevel_value, data, "BeginTransactionRequest_IsolationLevel") + if err != nil { + return err + } + *x = BeginTransactionRequest_IsolationLevel(value) + return nil +} + +type CommitRequest_Mode int32 + +const ( + CommitRequest_TRANSACTIONAL CommitRequest_Mode = 1 + CommitRequest_NON_TRANSACTIONAL CommitRequest_Mode = 2 +) + +var CommitRequest_Mode_name = map[int32]string{ + 1: "TRANSACTIONAL", + 2: "NON_TRANSACTIONAL", +} +var CommitRequest_Mode_value = map[string]int32{ + "TRANSACTIONAL": 1, + "NON_TRANSACTIONAL": 2, +} + +func (x CommitRequest_Mode) Enum() *CommitRequest_Mode { + p := new(CommitRequest_Mode) + *p = x + return p +} +func (x CommitRequest_Mode) String() string { + return proto.EnumName(CommitRequest_Mode_name, int32(x)) +} +func (x *CommitRequest_Mode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(CommitRequest_Mode_value, data, "CommitRequest_Mode") + if err != nil { + return err + } + *x = CommitRequest_Mode(value) + return nil +} + +// An identifier for a particular subset of entities. +// +// Entities are partitioned into various subsets, each used by different +// datasets and different namespaces within a dataset and so forth. +// +// All input partition IDs are normalized before use. +// A partition ID is normalized as follows: +// If the partition ID is unset or is set to an empty partition ID, replace it +// with the context partition ID. +// Otherwise, if the partition ID has no dataset ID, assign it the context +// partition ID's dataset ID. +// Unless otherwise documented, the context partition ID has the dataset ID set +// to the context dataset ID and no other partition dimension set. +// +// A partition ID is empty if all of its fields are unset. +// +// Partition dimension: +// A dimension may be unset. +// A dimension's value must never be "". +// A dimension's value must match [A-Za-z\d\.\-_]{1,100} +// If the value of any dimension matches regex "__.*__", +// the partition is reserved/read-only. +// A reserved/read-only partition ID is forbidden in certain documented contexts. +// +// Dataset ID: +// A dataset id's value must never be "". +// A dataset id's value must match +// ([a-z\d\-]{1,100}~)?([a-z\d][a-z\d\-\.]{0,99}:)?([a-z\d][a-z\d\-]{0,99} +type PartitionId struct { + // The dataset ID. + DatasetId *string `protobuf:"bytes,3,opt,name=dataset_id" json:"dataset_id,omitempty"` + // The namespace. + Namespace *string `protobuf:"bytes,4,opt,name=namespace" json:"namespace,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PartitionId) Reset() { *m = PartitionId{} } +func (m *PartitionId) String() string { return proto.CompactTextString(m) } +func (*PartitionId) ProtoMessage() {} + +func (m *PartitionId) GetDatasetId() string { + if m != nil && m.DatasetId != nil { + return *m.DatasetId + } + return "" +} + +func (m *PartitionId) GetNamespace() string { + if m != nil && m.Namespace != nil { + return *m.Namespace + } + return "" +} + +// A unique identifier for an entity. +// If a key's partition id or any of its path kinds or names are +// reserved/read-only, the key is reserved/read-only. +// A reserved/read-only key is forbidden in certain documented contexts. +type Key struct { + // Entities are partitioned into subsets, currently identified by a dataset + // (usually implicitly specified by the project) and namespace ID. + // Queries are scoped to a single partition. + PartitionId *PartitionId `protobuf:"bytes,1,opt,name=partition_id" json:"partition_id,omitempty"` + // The entity path. + // An entity path consists of one or more elements composed of a kind and a + // string or numerical identifier, which identify entities. The first + // element identifies a root entity, the second element identifies + // a child of the root entity, the third element a child of the + // second entity, and so forth. The entities identified by all prefixes of + // the path are called the element's ancestors. + // An entity path is always fully complete: ALL of the entity's ancestors + // are required to be in the path along with the entity identifier itself. + // The only exception is that in some documented cases, the identifier in the + // last path element (for the entity) itself may be omitted. A path can never + // be empty. + PathElement []*Key_PathElement `protobuf:"bytes,2,rep,name=path_element" json:"path_element,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Key) Reset() { *m = Key{} } +func (m *Key) String() string { return proto.CompactTextString(m) } +func (*Key) ProtoMessage() {} + +func (m *Key) GetPartitionId() *PartitionId { + if m != nil { + return m.PartitionId + } + return nil +} + +func (m *Key) GetPathElement() []*Key_PathElement { + if m != nil { + return m.PathElement + } + return nil +} + +// A (kind, ID/name) pair used to construct a key path. +// +// At most one of name or ID may be set. +// If either is set, the element is complete. +// If neither is set, the element is incomplete. +type Key_PathElement struct { + // The kind of the entity. + // A kind matching regex "__.*__" is reserved/read-only. + // A kind must not contain more than 500 characters. + // Cannot be "". + Kind *string `protobuf:"bytes,1,req,name=kind" json:"kind,omitempty"` + // The ID of the entity. + // Never equal to zero. Values less than zero are discouraged and will not + // be supported in the future. + Id *int64 `protobuf:"varint,2,opt,name=id" json:"id,omitempty"` + // The name of the entity. + // A name matching regex "__.*__" is reserved/read-only. + // A name must not be more than 500 characters. + // Cannot be "". + Name *string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Key_PathElement) Reset() { *m = Key_PathElement{} } +func (m *Key_PathElement) String() string { return proto.CompactTextString(m) } +func (*Key_PathElement) ProtoMessage() {} + +func (m *Key_PathElement) GetKind() string { + if m != nil && m.Kind != nil { + return *m.Kind + } + return "" +} + +func (m *Key_PathElement) GetId() int64 { + if m != nil && m.Id != nil { + return *m.Id + } + return 0 +} + +func (m *Key_PathElement) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +// A message that can hold any of the supported value types and associated +// metadata. +// +// At most one of the Value fields may be set. +// If none are set the value is "null". +// +type Value struct { + // A boolean value. + BooleanValue *bool `protobuf:"varint,1,opt,name=boolean_value" json:"boolean_value,omitempty"` + // An integer value. + IntegerValue *int64 `protobuf:"varint,2,opt,name=integer_value" json:"integer_value,omitempty"` + // A double value. + DoubleValue *float64 `protobuf:"fixed64,3,opt,name=double_value" json:"double_value,omitempty"` + // A timestamp value. + TimestampMicrosecondsValue *int64 `protobuf:"varint,4,opt,name=timestamp_microseconds_value" json:"timestamp_microseconds_value,omitempty"` + // A key value. + KeyValue *Key `protobuf:"bytes,5,opt,name=key_value" json:"key_value,omitempty"` + // A blob key value. + BlobKeyValue *string `protobuf:"bytes,16,opt,name=blob_key_value" json:"blob_key_value,omitempty"` + // A UTF-8 encoded string value. + StringValue *string `protobuf:"bytes,17,opt,name=string_value" json:"string_value,omitempty"` + // A blob value. + BlobValue []byte `protobuf:"bytes,18,opt,name=blob_value" json:"blob_value,omitempty"` + // An entity value. + // May have no key. + // May have a key with an incomplete key path. + // May have a reserved/read-only key. + EntityValue *Entity `protobuf:"bytes,6,opt,name=entity_value" json:"entity_value,omitempty"` + // A list value. + // Cannot contain another list value. + // Cannot also have a meaning and indexing set. + ListValue []*Value `protobuf:"bytes,7,rep,name=list_value" json:"list_value,omitempty"` + // The meaning field is reserved and should not be used. + Meaning *int32 `protobuf:"varint,14,opt,name=meaning" json:"meaning,omitempty"` + // If the value should be indexed. + // + // The indexed property may be set for a + // null value. + // When indexed is true, stringValue + // is limited to 500 characters and the blob value is limited to 500 bytes. + // Exception: If meaning is set to 2, string_value is limited to 2038 + // characters regardless of indexed. + // When indexed is true, meaning 15 and 22 are not allowed, and meaning 16 + // will be ignored on input (and will never be set on output). + // Input values by default have indexed set to + // true; however, you can explicitly set indexed to + // true if you want. (An output value never has + // indexed explicitly set to true.) If a value is + // itself an entity, it cannot have indexed set to + // true. + // Exception: An entity value with meaning 9, 20 or 21 may be indexed. + Indexed *bool `protobuf:"varint,15,opt,name=indexed,def=1" json:"indexed,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Value) Reset() { *m = Value{} } +func (m *Value) String() string { return proto.CompactTextString(m) } +func (*Value) ProtoMessage() {} + +const Default_Value_Indexed bool = true + +func (m *Value) GetBooleanValue() bool { + if m != nil && m.BooleanValue != nil { + return *m.BooleanValue + } + return false +} + +func (m *Value) GetIntegerValue() int64 { + if m != nil && m.IntegerValue != nil { + return *m.IntegerValue + } + return 0 +} + +func (m *Value) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *Value) GetTimestampMicrosecondsValue() int64 { + if m != nil && m.TimestampMicrosecondsValue != nil { + return *m.TimestampMicrosecondsValue + } + return 0 +} + +func (m *Value) GetKeyValue() *Key { + if m != nil { + return m.KeyValue + } + return nil +} + +func (m *Value) GetBlobKeyValue() string { + if m != nil && m.BlobKeyValue != nil { + return *m.BlobKeyValue + } + return "" +} + +func (m *Value) GetStringValue() string { + if m != nil && m.StringValue != nil { + return *m.StringValue + } + return "" +} + +func (m *Value) GetBlobValue() []byte { + if m != nil { + return m.BlobValue + } + return nil +} + +func (m *Value) GetEntityValue() *Entity { + if m != nil { + return m.EntityValue + } + return nil +} + +func (m *Value) GetListValue() []*Value { + if m != nil { + return m.ListValue + } + return nil +} + +func (m *Value) GetMeaning() int32 { + if m != nil && m.Meaning != nil { + return *m.Meaning + } + return 0 +} + +func (m *Value) GetIndexed() bool { + if m != nil && m.Indexed != nil { + return *m.Indexed + } + return Default_Value_Indexed +} + +// An entity property. +type Property struct { + // The name of the property. + // A property name matching regex "__.*__" is reserved. + // A reserved property name is forbidden in certain documented contexts. + // The name must not contain more than 500 characters. + // Cannot be "". + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + // The value(s) of the property. + // Each value can have only one value property populated. For example, + // you cannot have a values list of { value: { integerValue: 22, + // stringValue: "a" } }, but you can have { value: { listValue: + // [ { integerValue: 22 }, { stringValue: "a" } ] }. + Value *Value `protobuf:"bytes,4,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Property) Reset() { *m = Property{} } +func (m *Property) String() string { return proto.CompactTextString(m) } +func (*Property) ProtoMessage() {} + +func (m *Property) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Property) GetValue() *Value { + if m != nil { + return m.Value + } + return nil +} + +// An entity. +// +// An entity is limited to 1 megabyte when stored. That roughly +// corresponds to a limit of 1 megabyte for the serialized form of this +// message. +type Entity struct { + // The entity's key. + // + // An entity must have a key, unless otherwise documented (for example, + // an entity in Value.entityValue may have no key). + // An entity's kind is its key's path's last element's kind, + // or null if it has no key. + Key *Key `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` + // The entity's properties. + // Each property's name must be unique for its entity. + Property []*Property `protobuf:"bytes,2,rep,name=property" json:"property,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Entity) Reset() { *m = Entity{} } +func (m *Entity) String() string { return proto.CompactTextString(m) } +func (*Entity) ProtoMessage() {} + +func (m *Entity) GetKey() *Key { + if m != nil { + return m.Key + } + return nil +} + +func (m *Entity) GetProperty() []*Property { + if m != nil { + return m.Property + } + return nil +} + +// The result of fetching an entity from the datastore. +type EntityResult struct { + // The resulting entity. + Entity *Entity `protobuf:"bytes,1,req,name=entity" json:"entity,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EntityResult) Reset() { *m = EntityResult{} } +func (m *EntityResult) String() string { return proto.CompactTextString(m) } +func (*EntityResult) ProtoMessage() {} + +func (m *EntityResult) GetEntity() *Entity { + if m != nil { + return m.Entity + } + return nil +} + +// A query. +type Query struct { + // The projection to return. If not set the entire entity is returned. + Projection []*PropertyExpression `protobuf:"bytes,2,rep,name=projection" json:"projection,omitempty"` + // The kinds to query (if empty, returns entities from all kinds). + Kind []*KindExpression `protobuf:"bytes,3,rep,name=kind" json:"kind,omitempty"` + // The filter to apply (optional). + Filter *Filter `protobuf:"bytes,4,opt,name=filter" json:"filter,omitempty"` + // The order to apply to the query results (if empty, order is unspecified). + Order []*PropertyOrder `protobuf:"bytes,5,rep,name=order" json:"order,omitempty"` + // The properties to group by (if empty, no grouping is applied to the + // result set). + GroupBy []*PropertyReference `protobuf:"bytes,6,rep,name=group_by" json:"group_by,omitempty"` + // A starting point for the query results. Optional. Query cursors are + // returned in query result batches. + StartCursor []byte `protobuf:"bytes,7,opt,name=start_cursor" json:"start_cursor,omitempty"` + // An ending point for the query results. Optional. Query cursors are + // returned in query result batches. + EndCursor []byte `protobuf:"bytes,8,opt,name=end_cursor" json:"end_cursor,omitempty"` + // The number of results to skip. Applies before limit, but after all other + // constraints (optional, defaults to 0). + Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"` + // The maximum number of results to return. Applies after all other + // constraints. Optional. + Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Query) Reset() { *m = Query{} } +func (m *Query) String() string { return proto.CompactTextString(m) } +func (*Query) ProtoMessage() {} + +const Default_Query_Offset int32 = 0 + +func (m *Query) GetProjection() []*PropertyExpression { + if m != nil { + return m.Projection + } + return nil +} + +func (m *Query) GetKind() []*KindExpression { + if m != nil { + return m.Kind + } + return nil +} + +func (m *Query) GetFilter() *Filter { + if m != nil { + return m.Filter + } + return nil +} + +func (m *Query) GetOrder() []*PropertyOrder { + if m != nil { + return m.Order + } + return nil +} + +func (m *Query) GetGroupBy() []*PropertyReference { + if m != nil { + return m.GroupBy + } + return nil +} + +func (m *Query) GetStartCursor() []byte { + if m != nil { + return m.StartCursor + } + return nil +} + +func (m *Query) GetEndCursor() []byte { + if m != nil { + return m.EndCursor + } + return nil +} + +func (m *Query) GetOffset() int32 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return Default_Query_Offset +} + +func (m *Query) GetLimit() int32 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return 0 +} + +// A representation of a kind. +type KindExpression struct { + // The name of the kind. + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *KindExpression) Reset() { *m = KindExpression{} } +func (m *KindExpression) String() string { return proto.CompactTextString(m) } +func (*KindExpression) ProtoMessage() {} + +func (m *KindExpression) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +// A reference to a property relative to the kind expressions. +// exactly. +type PropertyReference struct { + // The name of the property. + Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PropertyReference) Reset() { *m = PropertyReference{} } +func (m *PropertyReference) String() string { return proto.CompactTextString(m) } +func (*PropertyReference) ProtoMessage() {} + +func (m *PropertyReference) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +// A representation of a property in a projection. +type PropertyExpression struct { + // The property to project. + Property *PropertyReference `protobuf:"bytes,1,req,name=property" json:"property,omitempty"` + // The aggregation function to apply to the property. Optional. + // Can only be used when grouping by at least one property. Must + // then be set on all properties in the projection that are not + // being grouped by. + AggregationFunction *PropertyExpression_AggregationFunction `protobuf:"varint,2,opt,name=aggregation_function,enum=datastore.PropertyExpression_AggregationFunction" json:"aggregation_function,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PropertyExpression) Reset() { *m = PropertyExpression{} } +func (m *PropertyExpression) String() string { return proto.CompactTextString(m) } +func (*PropertyExpression) ProtoMessage() {} + +func (m *PropertyExpression) GetProperty() *PropertyReference { + if m != nil { + return m.Property + } + return nil +} + +func (m *PropertyExpression) GetAggregationFunction() PropertyExpression_AggregationFunction { + if m != nil && m.AggregationFunction != nil { + return *m.AggregationFunction + } + return PropertyExpression_FIRST +} + +// The desired order for a specific property. +type PropertyOrder struct { + // The property to order by. + Property *PropertyReference `protobuf:"bytes,1,req,name=property" json:"property,omitempty"` + // The direction to order by. + Direction *PropertyOrder_Direction `protobuf:"varint,2,opt,name=direction,enum=datastore.PropertyOrder_Direction,def=1" json:"direction,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PropertyOrder) Reset() { *m = PropertyOrder{} } +func (m *PropertyOrder) String() string { return proto.CompactTextString(m) } +func (*PropertyOrder) ProtoMessage() {} + +const Default_PropertyOrder_Direction PropertyOrder_Direction = PropertyOrder_ASCENDING + +func (m *PropertyOrder) GetProperty() *PropertyReference { + if m != nil { + return m.Property + } + return nil +} + +func (m *PropertyOrder) GetDirection() PropertyOrder_Direction { + if m != nil && m.Direction != nil { + return *m.Direction + } + return Default_PropertyOrder_Direction +} + +// A holder for any type of filter. Exactly one field should be specified. +type Filter struct { + // A composite filter. + CompositeFilter *CompositeFilter `protobuf:"bytes,1,opt,name=composite_filter" json:"composite_filter,omitempty"` + // A filter on a property. + PropertyFilter *PropertyFilter `protobuf:"bytes,2,opt,name=property_filter" json:"property_filter,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Filter) Reset() { *m = Filter{} } +func (m *Filter) String() string { return proto.CompactTextString(m) } +func (*Filter) ProtoMessage() {} + +func (m *Filter) GetCompositeFilter() *CompositeFilter { + if m != nil { + return m.CompositeFilter + } + return nil +} + +func (m *Filter) GetPropertyFilter() *PropertyFilter { + if m != nil { + return m.PropertyFilter + } + return nil +} + +// A filter that merges the multiple other filters using the given operation. +type CompositeFilter struct { + // The operator for combining multiple filters. + Operator *CompositeFilter_Operator `protobuf:"varint,1,req,name=operator,enum=datastore.CompositeFilter_Operator" json:"operator,omitempty"` + // The list of filters to combine. + // Must contain at least one filter. + Filter []*Filter `protobuf:"bytes,2,rep,name=filter" json:"filter,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompositeFilter) Reset() { *m = CompositeFilter{} } +func (m *CompositeFilter) String() string { return proto.CompactTextString(m) } +func (*CompositeFilter) ProtoMessage() {} + +func (m *CompositeFilter) GetOperator() CompositeFilter_Operator { + if m != nil && m.Operator != nil { + return *m.Operator + } + return CompositeFilter_AND +} + +func (m *CompositeFilter) GetFilter() []*Filter { + if m != nil { + return m.Filter + } + return nil +} + +// A filter on a specific property. +type PropertyFilter struct { + // The property to filter by. + Property *PropertyReference `protobuf:"bytes,1,req,name=property" json:"property,omitempty"` + // The operator to filter by. + Operator *PropertyFilter_Operator `protobuf:"varint,2,req,name=operator,enum=datastore.PropertyFilter_Operator" json:"operator,omitempty"` + // The value to compare the property to. + Value *Value `protobuf:"bytes,3,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PropertyFilter) Reset() { *m = PropertyFilter{} } +func (m *PropertyFilter) String() string { return proto.CompactTextString(m) } +func (*PropertyFilter) ProtoMessage() {} + +func (m *PropertyFilter) GetProperty() *PropertyReference { + if m != nil { + return m.Property + } + return nil +} + +func (m *PropertyFilter) GetOperator() PropertyFilter_Operator { + if m != nil && m.Operator != nil { + return *m.Operator + } + return PropertyFilter_LESS_THAN +} + +func (m *PropertyFilter) GetValue() *Value { + if m != nil { + return m.Value + } + return nil +} + +// A GQL query. +type GqlQuery struct { + QueryString *string `protobuf:"bytes,1,req,name=query_string" json:"query_string,omitempty"` + // When false, the query string must not contain a literal. + AllowLiteral *bool `protobuf:"varint,2,opt,name=allow_literal,def=0" json:"allow_literal,omitempty"` + // A named argument must set field GqlQueryArg.name. + // No two named arguments may have the same name. + // For each non-reserved named binding site in the query string, + // there must be a named argument with that name, + // but not necessarily the inverse. + NameArg []*GqlQueryArg `protobuf:"bytes,3,rep,name=name_arg" json:"name_arg,omitempty"` + // Numbered binding site @1 references the first numbered argument, + // effectively using 1-based indexing, rather than the usual 0. + // A numbered argument must NOT set field GqlQueryArg.name. + // For each binding site numbered i in query_string, + // there must be an ith numbered argument. + // The inverse must also be true. + NumberArg []*GqlQueryArg `protobuf:"bytes,4,rep,name=number_arg" json:"number_arg,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GqlQuery) Reset() { *m = GqlQuery{} } +func (m *GqlQuery) String() string { return proto.CompactTextString(m) } +func (*GqlQuery) ProtoMessage() {} + +const Default_GqlQuery_AllowLiteral bool = false + +func (m *GqlQuery) GetQueryString() string { + if m != nil && m.QueryString != nil { + return *m.QueryString + } + return "" +} + +func (m *GqlQuery) GetAllowLiteral() bool { + if m != nil && m.AllowLiteral != nil { + return *m.AllowLiteral + } + return Default_GqlQuery_AllowLiteral +} + +func (m *GqlQuery) GetNameArg() []*GqlQueryArg { + if m != nil { + return m.NameArg + } + return nil +} + +func (m *GqlQuery) GetNumberArg() []*GqlQueryArg { + if m != nil { + return m.NumberArg + } + return nil +} + +// A binding argument for a GQL query. +// Exactly one of fields value and cursor must be set. +type GqlQueryArg struct { + // Must match regex "[A-Za-z_$][A-Za-z_$0-9]*". + // Must not match regex "__.*__". + // Must not be "". + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value *Value `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Cursor []byte `protobuf:"bytes,3,opt,name=cursor" json:"cursor,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GqlQueryArg) Reset() { *m = GqlQueryArg{} } +func (m *GqlQueryArg) String() string { return proto.CompactTextString(m) } +func (*GqlQueryArg) ProtoMessage() {} + +func (m *GqlQueryArg) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *GqlQueryArg) GetValue() *Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *GqlQueryArg) GetCursor() []byte { + if m != nil { + return m.Cursor + } + return nil +} + +// A batch of results produced by a query. +type QueryResultBatch struct { + // The result type for every entity in entityResults. + EntityResultType *EntityResult_ResultType `protobuf:"varint,1,req,name=entity_result_type,enum=datastore.EntityResult_ResultType" json:"entity_result_type,omitempty"` + // The results for this batch. + EntityResult []*EntityResult `protobuf:"bytes,2,rep,name=entity_result" json:"entity_result,omitempty"` + // A cursor that points to the position after the last result in the batch. + // May be absent. + EndCursor []byte `protobuf:"bytes,4,opt,name=end_cursor" json:"end_cursor,omitempty"` + // The state of the query after the current batch. + MoreResults *QueryResultBatch_MoreResultsType `protobuf:"varint,5,req,name=more_results,enum=datastore.QueryResultBatch_MoreResultsType" json:"more_results,omitempty"` + // The number of results skipped because of Query.offset. + SkippedResults *int32 `protobuf:"varint,6,opt,name=skipped_results" json:"skipped_results,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *QueryResultBatch) Reset() { *m = QueryResultBatch{} } +func (m *QueryResultBatch) String() string { return proto.CompactTextString(m) } +func (*QueryResultBatch) ProtoMessage() {} + +func (m *QueryResultBatch) GetEntityResultType() EntityResult_ResultType { + if m != nil && m.EntityResultType != nil { + return *m.EntityResultType + } + return EntityResult_FULL +} + +func (m *QueryResultBatch) GetEntityResult() []*EntityResult { + if m != nil { + return m.EntityResult + } + return nil +} + +func (m *QueryResultBatch) GetEndCursor() []byte { + if m != nil { + return m.EndCursor + } + return nil +} + +func (m *QueryResultBatch) GetMoreResults() QueryResultBatch_MoreResultsType { + if m != nil && m.MoreResults != nil { + return *m.MoreResults + } + return QueryResultBatch_NOT_FINISHED +} + +func (m *QueryResultBatch) GetSkippedResults() int32 { + if m != nil && m.SkippedResults != nil { + return *m.SkippedResults + } + return 0 +} + +// A set of changes to apply. +// +// No entity in this message may have a reserved property name, +// not even a property in an entity in a value. +// No value in this message may have meaning 18, +// not even a value in an entity in another value. +// +// If entities with duplicate keys are present, an arbitrary choice will +// be made as to which is written. +type Mutation struct { + // Entities to upsert. + // Each upserted entity's key must have a complete path and + // must not be reserved/read-only. + Upsert []*Entity `protobuf:"bytes,1,rep,name=upsert" json:"upsert,omitempty"` + // Entities to update. + // Each updated entity's key must have a complete path and + // must not be reserved/read-only. + Update []*Entity `protobuf:"bytes,2,rep,name=update" json:"update,omitempty"` + // Entities to insert. + // Each inserted entity's key must have a complete path and + // must not be reserved/read-only. + Insert []*Entity `protobuf:"bytes,3,rep,name=insert" json:"insert,omitempty"` + // Insert entities with a newly allocated ID. + // Each inserted entity's key must omit the final identifier in its path and + // must not be reserved/read-only. + InsertAutoId []*Entity `protobuf:"bytes,4,rep,name=insert_auto_id" json:"insert_auto_id,omitempty"` + // Keys of entities to delete. + // Each key must have a complete key path and must not be reserved/read-only. + Delete []*Key `protobuf:"bytes,5,rep,name=delete" json:"delete,omitempty"` + // Ignore a user specified read-only period. Optional. + Force *bool `protobuf:"varint,6,opt,name=force" json:"force,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Mutation) Reset() { *m = Mutation{} } +func (m *Mutation) String() string { return proto.CompactTextString(m) } +func (*Mutation) ProtoMessage() {} + +func (m *Mutation) GetUpsert() []*Entity { + if m != nil { + return m.Upsert + } + return nil +} + +func (m *Mutation) GetUpdate() []*Entity { + if m != nil { + return m.Update + } + return nil +} + +func (m *Mutation) GetInsert() []*Entity { + if m != nil { + return m.Insert + } + return nil +} + +func (m *Mutation) GetInsertAutoId() []*Entity { + if m != nil { + return m.InsertAutoId + } + return nil +} + +func (m *Mutation) GetDelete() []*Key { + if m != nil { + return m.Delete + } + return nil +} + +func (m *Mutation) GetForce() bool { + if m != nil && m.Force != nil { + return *m.Force + } + return false +} + +// The result of applying a mutation. +type MutationResult struct { + // Number of index writes. + IndexUpdates *int32 `protobuf:"varint,1,req,name=index_updates" json:"index_updates,omitempty"` + // Keys for insertAutoId entities. One per entity from the + // request, in the same order. + InsertAutoIdKey []*Key `protobuf:"bytes,2,rep,name=insert_auto_id_key" json:"insert_auto_id_key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MutationResult) Reset() { *m = MutationResult{} } +func (m *MutationResult) String() string { return proto.CompactTextString(m) } +func (*MutationResult) ProtoMessage() {} + +func (m *MutationResult) GetIndexUpdates() int32 { + if m != nil && m.IndexUpdates != nil { + return *m.IndexUpdates + } + return 0 +} + +func (m *MutationResult) GetInsertAutoIdKey() []*Key { + if m != nil { + return m.InsertAutoIdKey + } + return nil +} + +// Options shared by read requests. +type ReadOptions struct { + // The read consistency to use. + // Cannot be set when transaction is set. + // Lookup and ancestor queries default to STRONG, global queries default to + // EVENTUAL and cannot be set to STRONG. + ReadConsistency *ReadOptions_ReadConsistency `protobuf:"varint,1,opt,name=read_consistency,enum=datastore.ReadOptions_ReadConsistency,def=0" json:"read_consistency,omitempty"` + // The transaction to use. Optional. + Transaction []byte `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ReadOptions) Reset() { *m = ReadOptions{} } +func (m *ReadOptions) String() string { return proto.CompactTextString(m) } +func (*ReadOptions) ProtoMessage() {} + +const Default_ReadOptions_ReadConsistency ReadOptions_ReadConsistency = ReadOptions_DEFAULT + +func (m *ReadOptions) GetReadConsistency() ReadOptions_ReadConsistency { + if m != nil && m.ReadConsistency != nil { + return *m.ReadConsistency + } + return Default_ReadOptions_ReadConsistency +} + +func (m *ReadOptions) GetTransaction() []byte { + if m != nil { + return m.Transaction + } + return nil +} + +// The request for Lookup. +type LookupRequest struct { + // Options for this lookup request. Optional. + ReadOptions *ReadOptions `protobuf:"bytes,1,opt,name=read_options" json:"read_options,omitempty"` + // Keys of entities to look up from the datastore. + Key []*Key `protobuf:"bytes,3,rep,name=key" json:"key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LookupRequest) Reset() { *m = LookupRequest{} } +func (m *LookupRequest) String() string { return proto.CompactTextString(m) } +func (*LookupRequest) ProtoMessage() {} + +func (m *LookupRequest) GetReadOptions() *ReadOptions { + if m != nil { + return m.ReadOptions + } + return nil +} + +func (m *LookupRequest) GetKey() []*Key { + if m != nil { + return m.Key + } + return nil +} + +// The response for Lookup. +type LookupResponse struct { + // Entities found as ResultType.FULL entities. + Found []*EntityResult `protobuf:"bytes,1,rep,name=found" json:"found,omitempty"` + // Entities not found as ResultType.KEY_ONLY entities. + Missing []*EntityResult `protobuf:"bytes,2,rep,name=missing" json:"missing,omitempty"` + // A list of keys that were not looked up due to resource constraints. + Deferred []*Key `protobuf:"bytes,3,rep,name=deferred" json:"deferred,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LookupResponse) Reset() { *m = LookupResponse{} } +func (m *LookupResponse) String() string { return proto.CompactTextString(m) } +func (*LookupResponse) ProtoMessage() {} + +func (m *LookupResponse) GetFound() []*EntityResult { + if m != nil { + return m.Found + } + return nil +} + +func (m *LookupResponse) GetMissing() []*EntityResult { + if m != nil { + return m.Missing + } + return nil +} + +func (m *LookupResponse) GetDeferred() []*Key { + if m != nil { + return m.Deferred + } + return nil +} + +// The request for RunQuery. +type RunQueryRequest struct { + // The options for this query. + ReadOptions *ReadOptions `protobuf:"bytes,1,opt,name=read_options" json:"read_options,omitempty"` + // Entities are partitioned into subsets, identified by a dataset (usually + // implicitly specified by the project) and namespace ID. Queries are scoped + // to a single partition. + // This partition ID is normalized with the standard default context + // partition ID, but all other partition IDs in RunQueryRequest are + // normalized with this partition ID as the context partition ID. + PartitionId *PartitionId `protobuf:"bytes,2,opt,name=partition_id" json:"partition_id,omitempty"` + // The query to run. + // Either this field or field gql_query must be set, but not both. + Query *Query `protobuf:"bytes,3,opt,name=query" json:"query,omitempty"` + // The GQL query to run. + // Either this field or field query must be set, but not both. + GqlQuery *GqlQuery `protobuf:"bytes,7,opt,name=gql_query" json:"gql_query,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RunQueryRequest) Reset() { *m = RunQueryRequest{} } +func (m *RunQueryRequest) String() string { return proto.CompactTextString(m) } +func (*RunQueryRequest) ProtoMessage() {} + +func (m *RunQueryRequest) GetReadOptions() *ReadOptions { + if m != nil { + return m.ReadOptions + } + return nil +} + +func (m *RunQueryRequest) GetPartitionId() *PartitionId { + if m != nil { + return m.PartitionId + } + return nil +} + +func (m *RunQueryRequest) GetQuery() *Query { + if m != nil { + return m.Query + } + return nil +} + +func (m *RunQueryRequest) GetGqlQuery() *GqlQuery { + if m != nil { + return m.GqlQuery + } + return nil +} + +// The response for RunQuery. +type RunQueryResponse struct { + // A batch of query results (always present). + Batch *QueryResultBatch `protobuf:"bytes,1,opt,name=batch" json:"batch,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RunQueryResponse) Reset() { *m = RunQueryResponse{} } +func (m *RunQueryResponse) String() string { return proto.CompactTextString(m) } +func (*RunQueryResponse) ProtoMessage() {} + +func (m *RunQueryResponse) GetBatch() *QueryResultBatch { + if m != nil { + return m.Batch + } + return nil +} + +// The request for BeginTransaction. +type BeginTransactionRequest struct { + // The transaction isolation level. + IsolationLevel *BeginTransactionRequest_IsolationLevel `protobuf:"varint,1,opt,name=isolation_level,enum=datastore.BeginTransactionRequest_IsolationLevel,def=0" json:"isolation_level,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} } +func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) } +func (*BeginTransactionRequest) ProtoMessage() {} + +const Default_BeginTransactionRequest_IsolationLevel BeginTransactionRequest_IsolationLevel = BeginTransactionRequest_SNAPSHOT + +func (m *BeginTransactionRequest) GetIsolationLevel() BeginTransactionRequest_IsolationLevel { + if m != nil && m.IsolationLevel != nil { + return *m.IsolationLevel + } + return Default_BeginTransactionRequest_IsolationLevel +} + +// The response for BeginTransaction. +type BeginTransactionResponse struct { + // The transaction identifier (always present). + Transaction []byte `protobuf:"bytes,1,opt,name=transaction" json:"transaction,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BeginTransactionResponse) Reset() { *m = BeginTransactionResponse{} } +func (m *BeginTransactionResponse) String() string { return proto.CompactTextString(m) } +func (*BeginTransactionResponse) ProtoMessage() {} + +func (m *BeginTransactionResponse) GetTransaction() []byte { + if m != nil { + return m.Transaction + } + return nil +} + +// The request for Rollback. +type RollbackRequest struct { + // The transaction identifier, returned by a call to + // beginTransaction. + Transaction []byte `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RollbackRequest) Reset() { *m = RollbackRequest{} } +func (m *RollbackRequest) String() string { return proto.CompactTextString(m) } +func (*RollbackRequest) ProtoMessage() {} + +func (m *RollbackRequest) GetTransaction() []byte { + if m != nil { + return m.Transaction + } + return nil +} + +// The response for Rollback. +type RollbackResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *RollbackResponse) Reset() { *m = RollbackResponse{} } +func (m *RollbackResponse) String() string { return proto.CompactTextString(m) } +func (*RollbackResponse) ProtoMessage() {} + +// The request for Commit. +type CommitRequest struct { + // The transaction identifier, returned by a call to + // beginTransaction. Must be set when mode is TRANSACTIONAL. + Transaction []byte `protobuf:"bytes,1,opt,name=transaction" json:"transaction,omitempty"` + // The mutation to perform. Optional. + Mutation *Mutation `protobuf:"bytes,2,opt,name=mutation" json:"mutation,omitempty"` + // The type of commit to perform. Either TRANSACTIONAL or NON_TRANSACTIONAL. + Mode *CommitRequest_Mode `protobuf:"varint,5,opt,name=mode,enum=datastore.CommitRequest_Mode,def=1" json:"mode,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CommitRequest) Reset() { *m = CommitRequest{} } +func (m *CommitRequest) String() string { return proto.CompactTextString(m) } +func (*CommitRequest) ProtoMessage() {} + +const Default_CommitRequest_Mode CommitRequest_Mode = CommitRequest_TRANSACTIONAL + +func (m *CommitRequest) GetTransaction() []byte { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *CommitRequest) GetMutation() *Mutation { + if m != nil { + return m.Mutation + } + return nil +} + +func (m *CommitRequest) GetMode() CommitRequest_Mode { + if m != nil && m.Mode != nil { + return *m.Mode + } + return Default_CommitRequest_Mode +} + +// The response for Commit. +type CommitResponse struct { + // The result of performing the mutation (if any). + MutationResult *MutationResult `protobuf:"bytes,1,opt,name=mutation_result" json:"mutation_result,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CommitResponse) Reset() { *m = CommitResponse{} } +func (m *CommitResponse) String() string { return proto.CompactTextString(m) } +func (*CommitResponse) ProtoMessage() {} + +func (m *CommitResponse) GetMutationResult() *MutationResult { + if m != nil { + return m.MutationResult + } + return nil +} + +// The request for AllocateIds. +type AllocateIdsRequest struct { + // A list of keys with incomplete key paths to allocate IDs for. + // No key may be reserved/read-only. + Key []*Key `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} } +func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) } +func (*AllocateIdsRequest) ProtoMessage() {} + +func (m *AllocateIdsRequest) GetKey() []*Key { + if m != nil { + return m.Key + } + return nil +} + +// The response for AllocateIds. +type AllocateIdsResponse struct { + // The keys specified in the request (in the same order), each with + // its key path completed with a newly allocated ID. + Key []*Key `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} } +func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) } +func (*AllocateIdsResponse) ProtoMessage() {} + +func (m *AllocateIdsResponse) GetKey() []*Key { + if m != nil { + return m.Key + } + return nil +} + +func init() { + proto.RegisterEnum("datastore.EntityResult_ResultType", EntityResult_ResultType_name, EntityResult_ResultType_value) + proto.RegisterEnum("datastore.PropertyExpression_AggregationFunction", PropertyExpression_AggregationFunction_name, PropertyExpression_AggregationFunction_value) + proto.RegisterEnum("datastore.PropertyOrder_Direction", PropertyOrder_Direction_name, PropertyOrder_Direction_value) + proto.RegisterEnum("datastore.CompositeFilter_Operator", CompositeFilter_Operator_name, CompositeFilter_Operator_value) + proto.RegisterEnum("datastore.PropertyFilter_Operator", PropertyFilter_Operator_name, PropertyFilter_Operator_value) + proto.RegisterEnum("datastore.QueryResultBatch_MoreResultsType", QueryResultBatch_MoreResultsType_name, QueryResultBatch_MoreResultsType_value) + proto.RegisterEnum("datastore.ReadOptions_ReadConsistency", ReadOptions_ReadConsistency_name, ReadOptions_ReadConsistency_value) + proto.RegisterEnum("datastore.BeginTransactionRequest_IsolationLevel", BeginTransactionRequest_IsolationLevel_name, BeginTransactionRequest_IsolationLevel_value) + proto.RegisterEnum("datastore.CommitRequest_Mode", CommitRequest_Mode_name, CommitRequest_Mode_value) +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.proto b/Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.proto new file mode 100644 index 000000000..d752beaa5 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.proto @@ -0,0 +1,606 @@ +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// The datastore v1 service proto definitions + +syntax = "proto2"; + +package datastore; +option java_package = "com.google.api.services.datastore"; + + +// An identifier for a particular subset of entities. +// +// Entities are partitioned into various subsets, each used by different +// datasets and different namespaces within a dataset and so forth. +// +// All input partition IDs are normalized before use. +// A partition ID is normalized as follows: +// If the partition ID is unset or is set to an empty partition ID, replace it +// with the context partition ID. +// Otherwise, if the partition ID has no dataset ID, assign it the context +// partition ID's dataset ID. +// Unless otherwise documented, the context partition ID has the dataset ID set +// to the context dataset ID and no other partition dimension set. +// +// A partition ID is empty if all of its fields are unset. +// +// Partition dimension: +// A dimension may be unset. +// A dimension's value must never be "". +// A dimension's value must match [A-Za-z\d\.\-_]{1,100} +// If the value of any dimension matches regex "__.*__", +// the partition is reserved/read-only. +// A reserved/read-only partition ID is forbidden in certain documented contexts. +// +// Dataset ID: +// A dataset id's value must never be "". +// A dataset id's value must match +// ([a-z\d\-]{1,100}~)?([a-z\d][a-z\d\-\.]{0,99}:)?([a-z\d][a-z\d\-]{0,99} +message PartitionId { + // The dataset ID. + optional string dataset_id = 3; + // The namespace. + optional string namespace = 4; +} + +// A unique identifier for an entity. +// If a key's partition id or any of its path kinds or names are +// reserved/read-only, the key is reserved/read-only. +// A reserved/read-only key is forbidden in certain documented contexts. +message Key { + // Entities are partitioned into subsets, currently identified by a dataset + // (usually implicitly specified by the project) and namespace ID. + // Queries are scoped to a single partition. + optional PartitionId partition_id = 1; + + // A (kind, ID/name) pair used to construct a key path. + // + // At most one of name or ID may be set. + // If either is set, the element is complete. + // If neither is set, the element is incomplete. + message PathElement { + // The kind of the entity. + // A kind matching regex "__.*__" is reserved/read-only. + // A kind must not contain more than 500 characters. + // Cannot be "". + required string kind = 1; + // The ID of the entity. + // Never equal to zero. Values less than zero are discouraged and will not + // be supported in the future. + optional int64 id = 2; + // The name of the entity. + // A name matching regex "__.*__" is reserved/read-only. + // A name must not be more than 500 characters. + // Cannot be "". + optional string name = 3; + } + + // The entity path. + // An entity path consists of one or more elements composed of a kind and a + // string or numerical identifier, which identify entities. The first + // element identifies a root entity, the second element identifies + // a child of the root entity, the third element a child of the + // second entity, and so forth. The entities identified by all prefixes of + // the path are called the element's ancestors. + // An entity path is always fully complete: ALL of the entity's ancestors + // are required to be in the path along with the entity identifier itself. + // The only exception is that in some documented cases, the identifier in the + // last path element (for the entity) itself may be omitted. A path can never + // be empty. + repeated PathElement path_element = 2; +} + +// A message that can hold any of the supported value types and associated +// metadata. +// +// At most one of the Value fields may be set. +// If none are set the value is "null". +// +message Value { + // A boolean value. + optional bool boolean_value = 1; + // An integer value. + optional int64 integer_value = 2; + // A double value. + optional double double_value = 3; + // A timestamp value. + optional int64 timestamp_microseconds_value = 4; + // A key value. + optional Key key_value = 5; + // A blob key value. + optional string blob_key_value = 16; + // A UTF-8 encoded string value. + optional string string_value = 17; + // A blob value. + optional bytes blob_value = 18; + // An entity value. + // May have no key. + // May have a key with an incomplete key path. + // May have a reserved/read-only key. + optional Entity entity_value = 6; + // A list value. + // Cannot contain another list value. + // Cannot also have a meaning and indexing set. + repeated Value list_value = 7; + + // The meaning field is reserved and should not be used. + optional int32 meaning = 14; + + // If the value should be indexed. + // + // The indexed property may be set for a + // null value. + // When indexed is true, stringValue + // is limited to 500 characters and the blob value is limited to 500 bytes. + // Exception: If meaning is set to 2, string_value is limited to 2038 + // characters regardless of indexed. + // When indexed is true, meaning 15 and 22 are not allowed, and meaning 16 + // will be ignored on input (and will never be set on output). + // Input values by default have indexed set to + // true; however, you can explicitly set indexed to + // true if you want. (An output value never has + // indexed explicitly set to true.) If a value is + // itself an entity, it cannot have indexed set to + // true. + // Exception: An entity value with meaning 9, 20 or 21 may be indexed. + optional bool indexed = 15 [default = true]; +} + +// An entity property. +message Property { + // The name of the property. + // A property name matching regex "__.*__" is reserved. + // A reserved property name is forbidden in certain documented contexts. + // The name must not contain more than 500 characters. + // Cannot be "". + required string name = 1; + + // The value(s) of the property. + // Each value can have only one value property populated. For example, + // you cannot have a values list of { value: { integerValue: 22, + // stringValue: "a" } }, but you can have { value: { listValue: + // [ { integerValue: 22 }, { stringValue: "a" } ] }. + required Value value = 4; +} + +// An entity. +// +// An entity is limited to 1 megabyte when stored. That roughly +// corresponds to a limit of 1 megabyte for the serialized form of this +// message. +message Entity { + // The entity's key. + // + // An entity must have a key, unless otherwise documented (for example, + // an entity in Value.entityValue may have no key). + // An entity's kind is its key's path's last element's kind, + // or null if it has no key. + optional Key key = 1; + // The entity's properties. + // Each property's name must be unique for its entity. + repeated Property property = 2; +} + +// The result of fetching an entity from the datastore. +message EntityResult { + // Specifies what data the 'entity' field contains. + // A ResultType is either implied (for example, in LookupResponse.found it + // is always FULL) or specified by context (for example, in message + // QueryResultBatch, field 'entity_result_type' specifies a ResultType + // for all the values in field 'entity_result'). + enum ResultType { + FULL = 1; // The entire entity. + PROJECTION = 2; // A projected subset of properties. + // The entity may have no key. + // A property value may have meaning 18. + KEY_ONLY = 3; // Only the key. + } + + // The resulting entity. + required Entity entity = 1; +} + +// A query. +message Query { + // The projection to return. If not set the entire entity is returned. + repeated PropertyExpression projection = 2; + + // The kinds to query (if empty, returns entities from all kinds). + repeated KindExpression kind = 3; + + // The filter to apply (optional). + optional Filter filter = 4; + + // The order to apply to the query results (if empty, order is unspecified). + repeated PropertyOrder order = 5; + + // The properties to group by (if empty, no grouping is applied to the + // result set). + repeated PropertyReference group_by = 6; + + // A starting point for the query results. Optional. Query cursors are + // returned in query result batches. + optional bytes /* serialized QueryCursor */ start_cursor = 7; + + // An ending point for the query results. Optional. Query cursors are + // returned in query result batches. + optional bytes /* serialized QueryCursor */ end_cursor = 8; + + // The number of results to skip. Applies before limit, but after all other + // constraints (optional, defaults to 0). + optional int32 offset = 10 [default=0]; + + // The maximum number of results to return. Applies after all other + // constraints. Optional. + optional int32 limit = 11; +} + +// A representation of a kind. +message KindExpression { + // The name of the kind. + required string name = 1; +} + +// A reference to a property relative to the kind expressions. +// exactly. +message PropertyReference { + // The name of the property. + required string name = 2; +} + +// A representation of a property in a projection. +message PropertyExpression { + enum AggregationFunction { + FIRST = 1; + } + // The property to project. + required PropertyReference property = 1; + // The aggregation function to apply to the property. Optional. + // Can only be used when grouping by at least one property. Must + // then be set on all properties in the projection that are not + // being grouped by. + optional AggregationFunction aggregation_function = 2; +} + +// The desired order for a specific property. +message PropertyOrder { + enum Direction { + ASCENDING = 1; + DESCENDING = 2; + } + // The property to order by. + required PropertyReference property = 1; + // The direction to order by. + optional Direction direction = 2 [default=ASCENDING]; +} + +// A holder for any type of filter. Exactly one field should be specified. +message Filter { + // A composite filter. + optional CompositeFilter composite_filter = 1; + // A filter on a property. + optional PropertyFilter property_filter = 2; +} + +// A filter that merges the multiple other filters using the given operation. +message CompositeFilter { + enum Operator { + AND = 1; + } + + // The operator for combining multiple filters. + required Operator operator = 1; + // The list of filters to combine. + // Must contain at least one filter. + repeated Filter filter = 2; +} + +// A filter on a specific property. +message PropertyFilter { + enum Operator { + LESS_THAN = 1; + LESS_THAN_OR_EQUAL = 2; + GREATER_THAN = 3; + GREATER_THAN_OR_EQUAL = 4; + EQUAL = 5; + + HAS_ANCESTOR = 11; + } + + // The property to filter by. + required PropertyReference property = 1; + // The operator to filter by. + required Operator operator = 2; + // The value to compare the property to. + required Value value = 3; +} + +// A GQL query. +message GqlQuery { + required string query_string = 1; + // When false, the query string must not contain a literal. + optional bool allow_literal = 2 [default = false]; + // A named argument must set field GqlQueryArg.name. + // No two named arguments may have the same name. + // For each non-reserved named binding site in the query string, + // there must be a named argument with that name, + // but not necessarily the inverse. + repeated GqlQueryArg name_arg = 3; + // Numbered binding site @1 references the first numbered argument, + // effectively using 1-based indexing, rather than the usual 0. + // A numbered argument must NOT set field GqlQueryArg.name. + // For each binding site numbered i in query_string, + // there must be an ith numbered argument. + // The inverse must also be true. + repeated GqlQueryArg number_arg = 4; +} + +// A binding argument for a GQL query. +// Exactly one of fields value and cursor must be set. +message GqlQueryArg { + // Must match regex "[A-Za-z_$][A-Za-z_$0-9]*". + // Must not match regex "__.*__". + // Must not be "". + optional string name = 1; + optional Value value = 2; + optional bytes cursor = 3; +} + +// A batch of results produced by a query. +message QueryResultBatch { + // The possible values for the 'more_results' field. + enum MoreResultsType { + NOT_FINISHED = 1; // There are additional batches to fetch from this query. + MORE_RESULTS_AFTER_LIMIT = 2; // The query is finished, but there are more + // results after the limit. + NO_MORE_RESULTS = 3; // The query has been exhausted. + } + + // The result type for every entity in entityResults. + required EntityResult.ResultType entity_result_type = 1; + // The results for this batch. + repeated EntityResult entity_result = 2; + + // A cursor that points to the position after the last result in the batch. + // May be absent. + optional bytes /* serialized QueryCursor */ end_cursor = 4; + + // The state of the query after the current batch. + required MoreResultsType more_results = 5; + + // The number of results skipped because of Query.offset. + optional int32 skipped_results = 6; +} + +// A set of changes to apply. +// +// No entity in this message may have a reserved property name, +// not even a property in an entity in a value. +// No value in this message may have meaning 18, +// not even a value in an entity in another value. +// +// If entities with duplicate keys are present, an arbitrary choice will +// be made as to which is written. +message Mutation { + // Entities to upsert. + // Each upserted entity's key must have a complete path and + // must not be reserved/read-only. + repeated Entity upsert = 1; + // Entities to update. + // Each updated entity's key must have a complete path and + // must not be reserved/read-only. + repeated Entity update = 2; + // Entities to insert. + // Each inserted entity's key must have a complete path and + // must not be reserved/read-only. + repeated Entity insert = 3; + // Insert entities with a newly allocated ID. + // Each inserted entity's key must omit the final identifier in its path and + // must not be reserved/read-only. + repeated Entity insert_auto_id = 4; + // Keys of entities to delete. + // Each key must have a complete key path and must not be reserved/read-only. + repeated Key delete = 5; + // Ignore a user specified read-only period. Optional. + optional bool force = 6; +} + +// The result of applying a mutation. +message MutationResult { + // Number of index writes. + required int32 index_updates = 1; + // Keys for insertAutoId entities. One per entity from the + // request, in the same order. + repeated Key insert_auto_id_key = 2; +} + +// Options shared by read requests. +message ReadOptions { + enum ReadConsistency { + DEFAULT = 0; + STRONG = 1; + EVENTUAL = 2; + } + + // The read consistency to use. + // Cannot be set when transaction is set. + // Lookup and ancestor queries default to STRONG, global queries default to + // EVENTUAL and cannot be set to STRONG. + optional ReadConsistency read_consistency = 1 [default=DEFAULT]; + + // The transaction to use. Optional. + optional bytes /* serialized Transaction */ transaction = 2; +} + +// The request for Lookup. +message LookupRequest { + + // Options for this lookup request. Optional. + optional ReadOptions read_options = 1; + // Keys of entities to look up from the datastore. + repeated Key key = 3; +} + +// The response for Lookup. +message LookupResponse { + + // The order of results in these fields is undefined and has no relation to + // the order of the keys in the input. + + // Entities found as ResultType.FULL entities. + repeated EntityResult found = 1; + + // Entities not found as ResultType.KEY_ONLY entities. + repeated EntityResult missing = 2; + + // A list of keys that were not looked up due to resource constraints. + repeated Key deferred = 3; +} + + +// The request for RunQuery. +message RunQueryRequest { + + // The options for this query. + optional ReadOptions read_options = 1; + + // Entities are partitioned into subsets, identified by a dataset (usually + // implicitly specified by the project) and namespace ID. Queries are scoped + // to a single partition. + // This partition ID is normalized with the standard default context + // partition ID, but all other partition IDs in RunQueryRequest are + // normalized with this partition ID as the context partition ID. + optional PartitionId partition_id = 2; + + // The query to run. + // Either this field or field gql_query must be set, but not both. + optional Query query = 3; + // The GQL query to run. + // Either this field or field query must be set, but not both. + optional GqlQuery gql_query = 7; +} + +// The response for RunQuery. +message RunQueryResponse { + + // A batch of query results (always present). + optional QueryResultBatch batch = 1; + +} + +// The request for BeginTransaction. +message BeginTransactionRequest { + + enum IsolationLevel { + SNAPSHOT = 0; // Read from a consistent snapshot. Concurrent transactions + // conflict if their mutations conflict. For example: + // Read(A),Write(B) may not conflict with Read(B),Write(A), + // but Read(B),Write(B) does conflict with Read(B),Write(B). + SERIALIZABLE = 1; // Read from a consistent snapshot. Concurrent + // transactions conflict if they cannot be serialized. + // For example Read(A),Write(B) does conflict with + // Read(B),Write(A) but Read(A) may not conflict with + // Write(A). + } + + // The transaction isolation level. + optional IsolationLevel isolation_level = 1 [default=SNAPSHOT]; +} + +// The response for BeginTransaction. +message BeginTransactionResponse { + + // The transaction identifier (always present). + optional bytes /* serialized Transaction */ transaction = 1; +} + +// The request for Rollback. +message RollbackRequest { + + // The transaction identifier, returned by a call to + // beginTransaction. + required bytes /* serialized Transaction */ transaction = 1; +} + +// The response for Rollback. +message RollbackResponse { +// Empty +} + +// The request for Commit. +message CommitRequest { + + enum Mode { + TRANSACTIONAL = 1; + NON_TRANSACTIONAL = 2; + } + + // The transaction identifier, returned by a call to + // beginTransaction. Must be set when mode is TRANSACTIONAL. + optional bytes /* serialized Transaction */ transaction = 1; + // The mutation to perform. Optional. + optional Mutation mutation = 2; + // The type of commit to perform. Either TRANSACTIONAL or NON_TRANSACTIONAL. + optional Mode mode = 5 [default=TRANSACTIONAL]; +} + +// The response for Commit. +message CommitResponse { + + // The result of performing the mutation (if any). + optional MutationResult mutation_result = 1; +} + +// The request for AllocateIds. +message AllocateIdsRequest { + + // A list of keys with incomplete key paths to allocate IDs for. + // No key may be reserved/read-only. + repeated Key key = 1; +} + +// The response for AllocateIds. +message AllocateIdsResponse { + + // The keys specified in the request (in the same order), each with + // its key path completed with a newly allocated ID. + repeated Key key = 1; +} + +// Each rpc normalizes the partition IDs of the keys in its input entities, +// and always returns entities with keys with normalized partition IDs. +// (Note that applies to all entities, including entities in values.) +service DatastoreService { + // Look up some entities by key. + rpc Lookup(LookupRequest) returns (LookupResponse) { + }; + // Query for entities. + rpc RunQuery(RunQueryRequest) returns (RunQueryResponse) { + }; + // Begin a new transaction. + rpc BeginTransaction(BeginTransactionRequest) returns (BeginTransactionResponse) { + }; + // Commit a transaction, optionally creating, deleting or modifying some + // entities. + rpc Commit(CommitRequest) returns (CommitResponse) { + }; + // Roll back a transaction. + rpc Rollback(RollbackRequest) returns (RollbackResponse) { + }; + // Allocate IDs for incomplete keys (useful for referencing an entity before + // it is inserted). + rpc AllocateIds(AllocateIdsRequest) returns (AllocateIdsResponse) { + }; +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/internal/opts/option.go b/Godeps/_workspace/src/google.golang.org/cloud/internal/opts/option.go new file mode 100644 index 000000000..c5ccf4f56 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/internal/opts/option.go @@ -0,0 +1,24 @@ +// Package opts holds the DialOpts struct, configurable by +// cloud.ClientOptions to set up transports for cloud packages. +// +// This is a separate page to prevent cycles between the core +// cloud packages. +package opts + +import ( + "net/http" + + "golang.org/x/oauth2" + "google.golang.org/grpc" +) + +type DialOpt struct { + Endpoint string + Scopes []string + UserAgent string + + TokenSource oauth2.TokenSource + + HTTPClient *http.Client + GRPCClient *grpc.ClientConn +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/internal/testutil/context.go b/Godeps/_workspace/src/google.golang.org/cloud/internal/testutil/context.go new file mode 100644 index 000000000..802eed51b --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/internal/testutil/context.go @@ -0,0 +1,69 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package testutil contains helper functions for writing tests. +package testutil + +import ( + "io/ioutil" + "log" + "net/http" + "os" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "google.golang.org/cloud" +) + +const ( + envProjID = "GCLOUD_TESTS_GOLANG_PROJECT_ID" + envPrivateKey = "GCLOUD_TESTS_GOLANG_KEY" +) + +func ProjID() string { + projID := os.Getenv(envProjID) + if projID == "" { + log.Fatal("CLOUD_TESTS_GOLANG_PROJECT_ID must be set. See CONTRIBUTING.md for details.") + } + return projID +} + +func TokenSource(ctx context.Context, scopes ...string) oauth2.TokenSource { + key := os.Getenv(envPrivateKey) + if key == "" { + log.Fatal("GCLOUD_TESTS_GOLANG_KEY must be set. See CONTRIBUTING.md for details.") + } + jsonKey, err := ioutil.ReadFile(key) + if err != nil { + log.Fatalf("Cannot read the JSON key file, err: %v", err) + } + conf, err := google.JWTConfigFromJSON(jsonKey, scopes...) + if err != nil { + log.Fatalf("google.JWTConfigFromJSON: %v", err) + } + return conf.TokenSource(ctx) +} + +// TODO(djd): Delete this function when it's no longer used. +func Context(scopes ...string) context.Context { + ctx := oauth2.NoContext + ts := TokenSource(ctx, scopes...) + return cloud.NewContext(ProjID(), oauth2.NewClient(ctx, ts)) +} + +// TODO(djd): Delete this function when it's no longer used. +func NoAuthContext() context.Context { + return cloud.NewContext(ProjID(), &http.Client{Transport: http.DefaultTransport}) +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/internal/transport/cancelreq.go b/Godeps/_workspace/src/google.golang.org/cloud/internal/transport/cancelreq.go new file mode 100644 index 000000000..ddae71cce --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/internal/transport/cancelreq.go @@ -0,0 +1,29 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.5 + +package transport + +import "net/http" + +// makeReqCancel returns a closure that cancels the given http.Request +// when called. +func makeReqCancel(req *http.Request) func(http.RoundTripper) { + c := make(chan struct{}) + req.Cancel = c + return func(http.RoundTripper) { + close(c) + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/internal/transport/cancelreq_legacy.go b/Godeps/_workspace/src/google.golang.org/cloud/internal/transport/cancelreq_legacy.go new file mode 100644 index 000000000..c11a4ddeb --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/internal/transport/cancelreq_legacy.go @@ -0,0 +1,31 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.5 + +package transport + +import "net/http" + +// makeReqCancel returns a closure that cancels the given http.Request +// when called. +func makeReqCancel(req *http.Request) func(http.RoundTripper) { + // Go 1.4 and prior do not have a reliable way of cancelling a request. + // Transport.CancelRequest will only work if the request is already in-flight. + return func(r http.RoundTripper) { + if t, ok := r.(*http.Transport); ok { + t.CancelRequest(req) + } + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/internal/transport/dial.go b/Godeps/_workspace/src/google.golang.org/cloud/internal/transport/dial.go new file mode 100644 index 000000000..29624410d --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/internal/transport/dial.go @@ -0,0 +1,134 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "errors" + "fmt" + "net/http" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "google.golang.org/cloud" + "google.golang.org/cloud/internal/opts" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/oauth" +) + +// ErrHTTP is returned when on a non-200 HTTP response. +type ErrHTTP struct { + StatusCode int + Body []byte + err error +} + +func (e *ErrHTTP) Error() string { + if e.err == nil { + return fmt.Sprintf("error during call, http status code: %v %s", e.StatusCode, e.Body) + } + return e.err.Error() +} + +// NewHTTPClient returns an HTTP client for use communicating with a Google cloud +// service, configured with the given ClientOptions. It also returns the endpoint +// for the service as specified in the options. +func NewHTTPClient(ctx context.Context, opt ...cloud.ClientOption) (*http.Client, string, error) { + var o opts.DialOpt + for _, opt := range opt { + opt.Resolve(&o) + } + if o.GRPCClient != nil { + return nil, "", errors.New("unsupported GRPC base transport specified") + } + // TODO(djd): Wrap all http.Clients with appropriate internal version to add + // UserAgent header and prepend correct endpoint. + if o.HTTPClient != nil { + return o.HTTPClient, o.Endpoint, nil + } + if o.TokenSource == nil { + var err error + o.TokenSource, err = google.DefaultTokenSource(ctx, o.Scopes...) + if err != nil { + return nil, "", fmt.Errorf("google.DefaultTokenSource: %v", err) + } + } + return oauth2.NewClient(ctx, o.TokenSource), o.Endpoint, nil +} + +// NewProtoClient returns a ProtoClient for communicating with a Google cloud service, +// configured with the given ClientOptions. +func NewProtoClient(ctx context.Context, opt ...cloud.ClientOption) (*ProtoClient, error) { + var o opts.DialOpt + for _, opt := range opt { + opt.Resolve(&o) + } + if o.GRPCClient != nil { + return nil, errors.New("unsupported GRPC base transport specified") + } + var client *http.Client + switch { + case o.HTTPClient != nil: + if o.TokenSource != nil { + return nil, errors.New("at most one of WithTokenSource or WithBaseHTTP may be provided") + } + client = o.HTTPClient + case o.TokenSource != nil: + client = oauth2.NewClient(ctx, o.TokenSource) + default: + var err error + client, err = google.DefaultClient(ctx, o.Scopes...) + if err != nil { + return nil, err + } + } + + return &ProtoClient{ + client: client, + endpoint: o.Endpoint, + userAgent: o.UserAgent, + }, nil +} + +// DialGRPC returns a GRPC connection for use communicating with a Google cloud +// service, configured with the given ClientOptions. +func DialGRPC(ctx context.Context, opt ...cloud.ClientOption) (*grpc.ClientConn, error) { + var o opts.DialOpt + for _, opt := range opt { + opt.Resolve(&o) + } + if o.HTTPClient != nil { + return nil, errors.New("unsupported HTTP base transport specified") + } + if o.GRPCClient != nil { + return o.GRPCClient, nil + } + if o.TokenSource == nil { + var err error + o.TokenSource, err = google.DefaultTokenSource(ctx, o.Scopes...) + if err != nil { + return nil, fmt.Errorf("google.DefaultTokenSource: %v", err) + } + } + grpcOpts := []grpc.DialOption{ + grpc.WithPerRPCCredentials(oauth.TokenSource{o.TokenSource}), + grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, "")), + } + if o.UserAgent != "" { + grpcOpts = append(grpcOpts, grpc.WithUserAgent(o.UserAgent)) + } + return grpc.Dial(o.Endpoint, grpcOpts...) +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/internal/transport/proto.go b/Godeps/_workspace/src/google.golang.org/cloud/internal/transport/proto.go new file mode 100644 index 000000000..05b11cde1 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/internal/transport/proto.go @@ -0,0 +1,80 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "bytes" + "io/ioutil" + "net/http" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" +) + +type ProtoClient struct { + client *http.Client + endpoint string + userAgent string +} + +func (c *ProtoClient) Call(ctx context.Context, method string, req, resp proto.Message) error { + payload, err := proto.Marshal(req) + if err != nil { + return err + } + + httpReq, err := http.NewRequest("POST", c.endpoint+method, bytes.NewReader(payload)) + if err != nil { + return err + } + httpReq.Header.Set("Content-Type", "application/x-protobuf") + if ua := c.userAgent; ua != "" { + httpReq.Header.Set("User-Agent", ua) + } + + errc := make(chan error, 1) + cancel := makeReqCancel(httpReq) + + go func() { + r, err := c.client.Do(httpReq) + if err != nil { + errc <- err + return + } + defer r.Body.Close() + + body, err := ioutil.ReadAll(r.Body) + if r.StatusCode != http.StatusOK { + err = &ErrHTTP{ + StatusCode: r.StatusCode, + Body: body, + err: err, + } + } + if err != nil { + errc <- err + return + } + errc <- proto.Unmarshal(body, resp) + }() + + select { + case <-ctx.Done(): + cancel(c.client.Transport) // Cancel the HTTP request. + return ctx.Err() + case err := <-errc: + return err + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/key.json.enc b/Godeps/_workspace/src/google.golang.org/cloud/key.json.enc new file mode 100644 index 000000000..2f673a84b Binary files /dev/null and b/Godeps/_workspace/src/google.golang.org/cloud/key.json.enc differ diff --git a/Godeps/_workspace/src/google.golang.org/cloud/option.go b/Godeps/_workspace/src/google.golang.org/cloud/option.go new file mode 100644 index 000000000..d4a5aea29 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/option.go @@ -0,0 +1,100 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloud + +import ( + "net/http" + + "golang.org/x/oauth2" + "google.golang.org/cloud/internal/opts" + "google.golang.org/grpc" +) + +// ClientOption is used when construct clients for each cloud service. +type ClientOption interface { + // Resolve configures the given DialOpts for this option. + Resolve(*opts.DialOpt) +} + +// WithTokenSource returns a ClientOption that specifies an OAuth2 token +// source to be used as the basis for authentication. +func WithTokenSource(s oauth2.TokenSource) ClientOption { + return withTokenSource{s} +} + +type withTokenSource struct{ ts oauth2.TokenSource } + +func (w withTokenSource) Resolve(o *opts.DialOpt) { + o.TokenSource = w.ts +} + +// WithEndpoint returns a ClientOption that overrides the default endpoint +// to be used for a service. +func WithEndpoint(url string) ClientOption { + return withEndpoint(url) +} + +type withEndpoint string + +func (w withEndpoint) Resolve(o *opts.DialOpt) { + o.Endpoint = string(w) +} + +// WithScopes returns a ClientOption that overrides the default OAuth2 scopes +// to be used for a service. +func WithScopes(scope ...string) ClientOption { + return withScopes(scope) +} + +type withScopes []string + +func (w withScopes) Resolve(o *opts.DialOpt) { + o.Scopes = []string(w) +} + +// WithUserAgent returns a ClientOption that sets the User-Agent. +func WithUserAgent(ua string) ClientOption { + return withUA(ua) +} + +type withUA string + +func (w withUA) Resolve(o *opts.DialOpt) { o.UserAgent = string(w) } + +// WithBaseHTTP returns a ClientOption that specifies the HTTP client to +// use as the basis of communications. This option may only be used with +// services that support HTTP as their communication transport. +func WithBaseHTTP(client *http.Client) ClientOption { + return withBaseHTTP{client} +} + +type withBaseHTTP struct{ client *http.Client } + +func (w withBaseHTTP) Resolve(o *opts.DialOpt) { + o.HTTPClient = w.client +} + +// WithBaseGRPC returns a ClientOption that specifies the GRPC client +// connection to use as the basis of communications. This option many only be +// used with services that support HRPC as their communication transport. +func WithBaseGRPC(client *grpc.ClientConn) ClientOption { + return withBaseGRPC{client} +} + +type withBaseGRPC struct{ client *grpc.ClientConn } + +func (w withBaseGRPC) Resolve(o *opts.DialOpt) { + o.GRPCClient = w.client +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/pubsub/example_test.go b/Godeps/_workspace/src/google.golang.org/cloud/pubsub/example_test.go new file mode 100644 index 000000000..939ca1823 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/pubsub/example_test.go @@ -0,0 +1,80 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub_test + +import ( + "io/ioutil" + "log" + "testing" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "google.golang.org/cloud" + "google.golang.org/cloud/pubsub" +) + +// TODO(jbd): Remove after Go 1.4. +// Related to https://codereview.appspot.com/107320046 +func TestA(t *testing.T) {} + +func Example_auth() context.Context { + // Initialize an authorized context with Google Developers Console + // JSON key. Read the google package examples to learn more about + // different authorization flows you can use. + // http://godoc.org/golang.org/x/oauth2/google + jsonKey, err := ioutil.ReadFile("/path/to/json/keyfile.json") + if err != nil { + log.Fatal(err) + } + conf, err := google.JWTConfigFromJSON( + jsonKey, + pubsub.ScopeCloudPlatform, + pubsub.ScopePubSub, + ) + if err != nil { + log.Fatal(err) + } + ctx := cloud.NewContext("project-id", conf.Client(oauth2.NoContext)) + // See the other samples to learn how to use the context. + return ctx +} + +func ExamplePublish() { + ctx := Example_auth() + + msgIDs, err := pubsub.Publish(ctx, "topic1", &pubsub.Message{ + Data: []byte("hello world"), + }) + if err != nil { + log.Fatal(err) + } + log.Printf("Published a message with a message id: %s\n", msgIDs[0]) +} + +func ExamplePull() { + ctx := Example_auth() + + // E.g. c.CreateSub("sub1", "topic1", time.Duration(0), "") + msgs, err := pubsub.Pull(ctx, "sub1", 1) + if err != nil { + log.Fatal(err) + } + log.Printf("New message arrived: %v\n", msgs[0]) + if err := pubsub.Ack(ctx, "sub1", msgs[0].AckID); err != nil { + log.Fatal(err) + } + log.Println("Acknowledged message") +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/pubsub/integration_test.go b/Godeps/_workspace/src/google.golang.org/cloud/pubsub/integration_test.go new file mode 100644 index 000000000..9ce68af9d --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/pubsub/integration_test.go @@ -0,0 +1,144 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build integration + +package pubsub + +import ( + "fmt" + "testing" + "time" + + "google.golang.org/cloud/internal/testutil" +) + +func TestAll(t *testing.T) { + ctx := testutil.Context(ScopePubSub, ScopeCloudPlatform) + now := time.Now() + topic := fmt.Sprintf("topic-%d", now.Unix()) + subscription := fmt.Sprintf("subscription-%d", now.Unix()) + + if err := CreateTopic(ctx, topic); err != nil { + t.Errorf("CreateTopic error: %v", err) + } + + if err := CreateSub(ctx, subscription, topic, time.Duration(0), ""); err != nil { + t.Errorf("CreateSub error: %v", err) + } + + exists, err := TopicExists(ctx, topic) + if err != nil { + t.Fatalf("TopicExists error: %v", err) + } + if !exists { + t.Errorf("topic %s should exist, but it doesn't", topic) + } + + exists, err = SubExists(ctx, subscription) + if err != nil { + t.Fatalf("SubExists error: %v", err) + } + if !exists { + t.Errorf("subscription %s should exist, but it doesn't", subscription) + } + + max := 10 + msgs := make([]*Message, max) + expectedMsgs := make(map[string]bool, max) + for i := 0; i < max; i++ { + text := fmt.Sprintf("a message with an index %d", i) + attrs := make(map[string]string) + attrs["foo"] = "bar" + msgs[i] = &Message{ + Data: []byte(text), + Attributes: attrs, + } + expectedMsgs[text] = false + } + + ids, err := Publish(ctx, topic, msgs...) + if err != nil { + t.Fatalf("Publish (1) error: %v", err) + } + + if len(ids) != max { + t.Errorf("unexpected number of message IDs received; %d, want %d", len(ids), max) + } + + expectedIDs := make(map[string]bool, max) + for _, id := range ids { + expectedIDs[id] = false + } + + received, err := PullWait(ctx, subscription, max) + if err != nil { + t.Fatalf("PullWait error: %v", err) + } + + if len(received) != max { + t.Errorf("unexpected number of messages received; %d, want %d", len(received), max) + } + + for _, msg := range received { + expectedMsgs[string(msg.Data)] = true + expectedIDs[msg.ID] = true + if msg.Attributes["foo"] != "bar" { + t.Errorf("message attribute foo is expected to be 'bar', found '%s'", msg.Attributes["foo"]) + } + } + + for msg, found := range expectedMsgs { + if !found { + t.Errorf("message '%s' should be received", msg) + } + } + + for id, found := range expectedIDs { + if !found { + t.Errorf("message with the message id '%s' should be received", id) + } + } + + // base64 test + data := "=@~" + msg := &Message{ + Data: []byte(data), + } + _, err = Publish(ctx, topic, msg) + if err != nil { + t.Fatalf("Publish (2) error: %v", err) + } + + received, err = PullWait(ctx, subscription, 1) + if err != nil { + t.Fatalf("PullWait error: %v", err) + } + if len(received) != 1 { + t.Fatalf("unexpected number of messages received; %d, want %d", len(received), 1) + } + if string(received[0].Data) != data { + t.Errorf("unexpexted message received; %s, want %s", string(received[0].Data), data) + } + + err = DeleteSub(ctx, subscription) + if err != nil { + t.Errorf("DeleteSub error: %v", err) + } + + err = DeleteTopic(ctx, topic) + if err != nil { + t.Errorf("DeleteTopic error: %v", err) + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/pubsub/pubsub.go b/Godeps/_workspace/src/google.golang.org/cloud/pubsub/pubsub.go new file mode 100644 index 000000000..eb79f4906 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/pubsub/pubsub.go @@ -0,0 +1,284 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package pubsub contains a Google Cloud Pub/Sub client. +// +// This package is experimental and may make backwards-incompatible changes. +// +// More information about Google Cloud Pub/Sub is available at +// https://cloud.google.com/pubsub/docs +package pubsub + +import ( + "encoding/base64" + "errors" + "fmt" + "net/http" + "time" + + "google.golang.org/cloud/internal" + + "golang.org/x/net/context" + "google.golang.org/api/googleapi" + raw "google.golang.org/api/pubsub/v1beta2" +) + +const ( + // ScopePubSub grants permissions to view and manage Pub/Sub + // topics and subscriptions. + ScopePubSub = "https://www.googleapis.com/auth/pubsub" + + // ScopeCloudPlatform grants permissions to view and manage your data + // across Google Cloud Platform services. + ScopeCloudPlatform = "https://www.googleapis.com/auth/cloud-platform" +) + +// batchLimit is maximun size of a single batch. +const batchLimit = 1000 + +// Message represents a Pub/Sub message. +type Message struct { + // ID identifies this message. + ID string + + // AckID is the identifier to acknowledge this message. + AckID string + + // Data is the actual data in the message. + Data []byte + + // Attributes represents the key-value pairs the current message + // is labelled with. + Attributes map[string]string +} + +// TODO(jbd): Add subscription and topic listing. + +// CreateSub creates a Pub/Sub subscription on the backend. +// A subscription should subscribe to an existing topic. +// +// The messages that haven't acknowledged will be pushed back to the +// subscription again when the default acknowledgement deadline is +// reached. You can override the default deadline by providing a +// non-zero deadline. Deadline must not be specified to +// precision greater than one second. +// +// As new messages are being queued on the subscription, you +// may recieve push notifications regarding to the new arrivals. +// To receive notifications of new messages in the queue, +// specify an endpoint callback URL. +// If endpoint is an empty string the backend will not notify the +// client of new messages. +// +// If the subscription already exists an error will be returned. +func CreateSub(ctx context.Context, name string, topic string, deadline time.Duration, endpoint string) error { + sub := &raw.Subscription{ + Topic: fullTopicName(internal.ProjID(ctx), topic), + } + if int64(deadline) > 0 { + if !isSec(deadline) { + return errors.New("pubsub: deadline must not be specified to precision greater than one second") + } + sub.AckDeadlineSeconds = int64(deadline / time.Second) + } + if endpoint != "" { + sub.PushConfig = &raw.PushConfig{PushEndpoint: endpoint} + } + _, err := rawService(ctx).Projects.Subscriptions.Create(fullSubName(internal.ProjID(ctx), name), sub).Do() + return err +} + +// DeleteSub deletes the subscription. +func DeleteSub(ctx context.Context, name string) error { + _, err := rawService(ctx).Projects.Subscriptions.Delete(fullSubName(internal.ProjID(ctx), name)).Do() + return err +} + +// ModifyAckDeadline modifies the acknowledgement deadline +// for the messages retrieved from the specified subscription. +// Deadline must not be specified to precision greater than one second. +func ModifyAckDeadline(ctx context.Context, sub string, id string, deadline time.Duration) error { + if !isSec(deadline) { + return errors.New("pubsub: deadline must not be specified to precision greater than one second") + } + _, err := rawService(ctx).Projects.Subscriptions.ModifyAckDeadline(fullSubName(internal.ProjID(ctx), sub), &raw.ModifyAckDeadlineRequest{ + AckDeadlineSeconds: int64(deadline / time.Second), + AckId: id, + }).Do() + return err +} + +// ModifyPushEndpoint modifies the URL endpoint to modify the resource +// to handle push notifications coming from the Pub/Sub backend +// for the specified subscription. +func ModifyPushEndpoint(ctx context.Context, sub, endpoint string) error { + _, err := rawService(ctx).Projects.Subscriptions.ModifyPushConfig(fullSubName(internal.ProjID(ctx), sub), &raw.ModifyPushConfigRequest{ + PushConfig: &raw.PushConfig{ + PushEndpoint: endpoint, + }, + }).Do() + return err +} + +// SubExists returns true if subscription exists. +func SubExists(ctx context.Context, name string) (bool, error) { + _, err := rawService(ctx).Projects.Subscriptions.Get(fullSubName(internal.ProjID(ctx), name)).Do() + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + return false, nil + } + if err != nil { + return false, err + } + return true, nil +} + +// Ack acknowledges one or more Pub/Sub messages on the +// specified subscription. +func Ack(ctx context.Context, sub string, id ...string) error { + for idx, ackID := range id { + if ackID == "" { + return fmt.Errorf("pubsub: empty ackID detected at index %d", idx) + } + } + _, err := rawService(ctx).Projects.Subscriptions.Acknowledge(fullSubName(internal.ProjID(ctx), sub), &raw.AcknowledgeRequest{ + AckIds: id, + }).Do() + return err +} + +func toMessage(resp *raw.ReceivedMessage) (*Message, error) { + if resp.Message == nil { + return &Message{AckID: resp.AckId}, nil + } + data, err := base64.StdEncoding.DecodeString(resp.Message.Data) + if err != nil { + return nil, err + } + return &Message{ + AckID: resp.AckId, + Data: data, + Attributes: resp.Message.Attributes, + ID: resp.Message.MessageId, + }, nil +} + +// Pull pulls messages from the subscription. It returns up to n +// number of messages, and n could not be larger than 100. +func Pull(ctx context.Context, sub string, n int) ([]*Message, error) { + return pull(ctx, sub, n, true) +} + +// PullWait pulls messages from the subscription. If there are not +// enough messages left in the subscription queue, it will block until +// at least n number of messages arrive or timeout occurs, and n could +// not be larger than 100. +func PullWait(ctx context.Context, sub string, n int) ([]*Message, error) { + return pull(ctx, sub, n, false) +} + +func pull(ctx context.Context, sub string, n int, retImmediately bool) ([]*Message, error) { + if n < 1 || n > batchLimit { + return nil, fmt.Errorf("pubsub: cannot pull less than one, more than %d messages, but %d was given", batchLimit, n) + } + resp, err := rawService(ctx).Projects.Subscriptions.Pull(fullSubName(internal.ProjID(ctx), sub), &raw.PullRequest{ + ReturnImmediately: retImmediately, + MaxMessages: int64(n), + }).Do() + if err != nil { + return nil, err + } + msgs := make([]*Message, len(resp.ReceivedMessages)) + for i := 0; i < len(resp.ReceivedMessages); i++ { + msg, err := toMessage(resp.ReceivedMessages[i]) + if err != nil { + return nil, fmt.Errorf("pubsub: cannot decode the retrieved message at index: %d, PullResponse: %+v", i, resp.ReceivedMessages[i]) + } + msgs[i] = msg + } + return msgs, nil +} + +// CreateTopic creates a new topic with the specified name on the backend. +// It will return an error if topic already exists. +func CreateTopic(ctx context.Context, name string) error { + _, err := rawService(ctx).Projects.Topics.Create(fullTopicName(internal.ProjID(ctx), name), &raw.Topic{}).Do() + return err +} + +// DeleteTopic deletes the specified topic. +func DeleteTopic(ctx context.Context, name string) error { + _, err := rawService(ctx).Projects.Topics.Delete(fullTopicName(internal.ProjID(ctx), name)).Do() + return err +} + +// TopicExists returns true if a topic exists with the specified name. +func TopicExists(ctx context.Context, name string) (bool, error) { + _, err := rawService(ctx).Projects.Topics.Get(fullTopicName(internal.ProjID(ctx), name)).Do() + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + return false, nil + } + if err != nil { + return false, err + } + return true, nil +} + +// Publish publish messages to the topic's subscribers. It returns +// message IDs upon success. +func Publish(ctx context.Context, topic string, msgs ...*Message) ([]string, error) { + var rawMsgs []*raw.PubsubMessage + if len(msgs) == 0 { + return nil, errors.New("pubsub: no messages to publish") + } + if len(msgs) > batchLimit { + return nil, fmt.Errorf("pubsub: %d messages given, but maximum batch size is %d", len(msgs), batchLimit) + } + rawMsgs = make([]*raw.PubsubMessage, len(msgs)) + for i, msg := range msgs { + rawMsgs[i] = &raw.PubsubMessage{ + Data: base64.StdEncoding.EncodeToString(msg.Data), + Attributes: msg.Attributes, + } + } + resp, err := rawService(ctx).Projects.Topics.Publish(fullTopicName(internal.ProjID(ctx), topic), &raw.PublishRequest{ + Messages: rawMsgs, + }).Do() + if err != nil { + return nil, err + } + return resp.MessageIds, nil +} + +// fullSubName returns the fully qualified name for a subscription. +// E.g. /subscriptions/project-id/subscription-name. +func fullSubName(proj, name string) string { + return fmt.Sprintf("projects/%s/subscriptions/%s", proj, name) +} + +// fullTopicName returns the fully qualified name for a topic. +// E.g. /topics/project-id/topic-name. +func fullTopicName(proj, name string) string { + return fmt.Sprintf("projects/%s/topics/%s", proj, name) +} + +func isSec(dur time.Duration) bool { + return dur%time.Second == 0 +} + +func rawService(ctx context.Context) *raw.Service { + return internal.Service(ctx, "pubsub", func(hc *http.Client) interface{} { + svc, _ := raw.New(hc) + return svc + }).(*raw.Service) +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/pubsub/pubsub_test.go b/Godeps/_workspace/src/google.golang.org/cloud/pubsub/pubsub_test.go new file mode 100644 index 000000000..df73f11f2 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/pubsub/pubsub_test.go @@ -0,0 +1,49 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "net/http" + "strings" + "testing" + "time" + + "google.golang.org/cloud" +) + +func TestIsSec(t *testing.T) { + tests := map[time.Duration]bool{ + time.Second: true, + 5 * time.Second: true, + time.Hour: true, + time.Millisecond: false, + time.Second + time.Microsecond: false, + } + for dur, expected := range tests { + if isSec(dur) != expected { + t.Errorf("%v is more precise than a second", dur) + } + } +} + +func TestEmptyAckID(t *testing.T) { + ctx := cloud.NewContext("project-id", &http.Client{}) + id := []string{"test", ""} + err := Ack(ctx, "sub", id...) + + if err == nil || !strings.Contains(err.Error(), "index 1") { + t.Errorf("Ack should report an error indicating the id is empty. Got: %v", err) + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/storage/acl.go b/Godeps/_workspace/src/google.golang.org/cloud/storage/acl.go new file mode 100644 index 000000000..9ae5b04e0 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/storage/acl.go @@ -0,0 +1,176 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "fmt" + + "golang.org/x/net/context" + raw "google.golang.org/api/storage/v1" +) + +// ACLRole is the the access permission for the entity. +type ACLRole string + +const ( + RoleOwner ACLRole = "OWNER" + RoleReader ACLRole = "READER" +) + +// ACLEntity is an entity holding an ACL permission. +// +// It could be in the form of: +// "user-", "user-","group-", "group-", +// "domain-" and "project-team-". +// +// Or one of the predefined constants: AllUsers, AllAuthenticatedUsers. +type ACLEntity string + +const ( + AllUsers ACLEntity = "allUsers" + AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers" +) + +// ACLRule represents an access control list rule entry for a Google Cloud Storage object or bucket. +// A bucket is a Google Cloud Storage container whose name is globally unique and contains zero or +// more objects. An object is a blob of data that is stored in a bucket. +type ACLRule struct { + // Entity identifies the entity holding the current rule's permissions. + Entity ACLEntity + + // Role is the the access permission for the entity. + Role ACLRole +} + +// DefaultACL returns the default object ACL entries for the named bucket. +func DefaultACL(ctx context.Context, bucket string) ([]ACLRule, error) { + acls, err := rawService(ctx).DefaultObjectAccessControls.List(bucket).Do() + if err != nil { + return nil, fmt.Errorf("storage: error listing default object ACL for bucket %q: %v", bucket, err) + } + r := make([]ACLRule, 0, len(acls.Items)) + for _, v := range acls.Items { + if m, ok := v.(map[string]interface{}); ok { + entity, ok1 := m["entity"].(string) + role, ok2 := m["role"].(string) + if ok1 && ok2 { + r = append(r, ACLRule{Entity: ACLEntity(entity), Role: ACLRole(role)}) + } + } + } + return r, nil +} + +// PutDefaultACLRule saves the named default object ACL entity with the provided role for the named bucket. +func PutDefaultACLRule(ctx context.Context, bucket string, entity ACLEntity, role ACLRole) error { + acl := &raw.ObjectAccessControl{ + Bucket: bucket, + Entity: string(entity), + Role: string(role), + } + _, err := rawService(ctx).DefaultObjectAccessControls.Update(bucket, string(entity), acl).Do() + if err != nil { + return fmt.Errorf("storage: error updating default ACL rule for bucket %q, entity %q: %v", bucket, entity, err) + } + return nil +} + +// DeleteDefaultACLRule deletes the named default ACL entity for the named bucket. +func DeleteDefaultACLRule(ctx context.Context, bucket string, entity ACLEntity) error { + err := rawService(ctx).DefaultObjectAccessControls.Delete(bucket, string(entity)).Do() + if err != nil { + return fmt.Errorf("storage: error deleting default ACL rule for bucket %q, entity %q: %v", bucket, entity, err) + } + return nil +} + +// BucketACL returns the ACL entries for the named bucket. +func BucketACL(ctx context.Context, bucket string) ([]ACLRule, error) { + acls, err := rawService(ctx).BucketAccessControls.List(bucket).Do() + if err != nil { + return nil, fmt.Errorf("storage: error listing bucket ACL for bucket %q: %v", bucket, err) + } + r := make([]ACLRule, len(acls.Items)) + for i, v := range acls.Items { + r[i].Entity = ACLEntity(v.Entity) + r[i].Role = ACLRole(v.Role) + } + return r, nil +} + +// PutBucketACLRule saves the named ACL entity with the provided role for the named bucket. +func PutBucketACLRule(ctx context.Context, bucket string, entity ACLEntity, role ACLRole) error { + acl := &raw.BucketAccessControl{ + Bucket: bucket, + Entity: string(entity), + Role: string(role), + } + _, err := rawService(ctx).BucketAccessControls.Update(bucket, string(entity), acl).Do() + if err != nil { + return fmt.Errorf("storage: error updating bucket ACL rule for bucket %q, entity %q: %v", bucket, entity, err) + } + return nil +} + +// DeleteBucketACLRule deletes the named ACL entity for the named bucket. +func DeleteBucketACLRule(ctx context.Context, bucket string, entity ACLEntity) error { + err := rawService(ctx).BucketAccessControls.Delete(bucket, string(entity)).Do() + if err != nil { + return fmt.Errorf("storage: error deleting bucket ACL rule for bucket %q, entity %q: %v", bucket, entity, err) + } + return nil +} + +// ACL returns the ACL entries for the named object. +func ACL(ctx context.Context, bucket, object string) ([]ACLRule, error) { + acls, err := rawService(ctx).ObjectAccessControls.List(bucket, object).Do() + if err != nil { + return nil, fmt.Errorf("storage: error listing object ACL for bucket %q, file %q: %v", bucket, object, err) + } + r := make([]ACLRule, 0, len(acls.Items)) + for _, v := range acls.Items { + if m, ok := v.(map[string]interface{}); ok { + entity, ok1 := m["entity"].(string) + role, ok2 := m["role"].(string) + if ok1 && ok2 { + r = append(r, ACLRule{Entity: ACLEntity(entity), Role: ACLRole(role)}) + } + } + } + return r, nil +} + +// PutACLRule saves the named ACL entity with the provided role for the named object. +func PutACLRule(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole) error { + acl := &raw.ObjectAccessControl{ + Bucket: bucket, + Entity: string(entity), + Role: string(role), + } + _, err := rawService(ctx).ObjectAccessControls.Update(bucket, object, string(entity), acl).Do() + if err != nil { + return fmt.Errorf("storage: error updating object ACL rule for bucket %q, file %q, entity %q: %v", bucket, object, entity, err) + } + return nil +} + +// DeleteACLRule deletes the named ACL entity for the named object. +func DeleteACLRule(ctx context.Context, bucket, object string, entity ACLEntity) error { + err := rawService(ctx).ObjectAccessControls.Delete(bucket, object, string(entity)).Do() + if err != nil { + return fmt.Errorf("storage: error deleting object ACL rule for bucket %q, file %q, entity %q: %v", bucket, object, entity, err) + } + return nil +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/storage/example_test.go b/Godeps/_workspace/src/google.golang.org/cloud/storage/example_test.go new file mode 100644 index 000000000..38c543021 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/storage/example_test.go @@ -0,0 +1,150 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage_test + +import ( + "io/ioutil" + "log" + "testing" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "google.golang.org/cloud" + "google.golang.org/cloud/storage" +) + +// TODO(jbd): Remove after Go 1.4. +// Related to https://codereview.appspot.com/107320046 +func TestA(t *testing.T) {} + +func Example_auth() context.Context { + // Initialize an authorized context with Google Developers Console + // JSON key. Read the google package examples to learn more about + // different authorization flows you can use. + // http://godoc.org/golang.org/x/oauth2/google + jsonKey, err := ioutil.ReadFile("/path/to/json/keyfile.json") + if err != nil { + log.Fatal(err) + } + conf, err := google.JWTConfigFromJSON( + jsonKey, + storage.ScopeFullControl, + ) + if err != nil { + log.Fatal(err) + } + ctx := cloud.NewContext("project-id", conf.Client(oauth2.NoContext)) + // Use the context (see other examples) + return ctx +} + +func ExampleListObjects() { + ctx := Example_auth() + + var query *storage.Query + for { + // If you are using this package on App Engine Managed VMs runtime, + // you can init a bucket client with your app's default bucket name. + // See http://godoc.org/google.golang.org/appengine/file#DefaultBucketName. + objects, err := storage.ListObjects(ctx, "bucketname", query) + if err != nil { + log.Fatal(err) + } + for _, obj := range objects.Results { + log.Printf("object name: %s, size: %v", obj.Name, obj.Size) + } + // if there are more results, objects.Next + // will be non-nil. + query = objects.Next + if query == nil { + break + } + } + + log.Println("paginated through all object items in the bucket you specified.") +} + +func ExampleNewReader() { + ctx := Example_auth() + + rc, err := storage.NewReader(ctx, "bucketname", "filename1") + if err != nil { + log.Fatal(err) + } + slurp, err := ioutil.ReadAll(rc) + rc.Close() + if err != nil { + log.Fatal(err) + } + + log.Println("file contents:", slurp) +} + +func ExampleNewWriter() { + ctx := Example_auth() + + wc := storage.NewWriter(ctx, "bucketname", "filename1") + wc.ContentType = "text/plain" + wc.ACL = []storage.ACLRule{{storage.AllUsers, storage.RoleReader}} + if _, err := wc.Write([]byte("hello world")); err != nil { + log.Fatal(err) + } + if err := wc.Close(); err != nil { + log.Fatal(err) + } + log.Println("updated object:", wc.Object()) +} + +func ExampleCopyObject() { + ctx := Example_auth() + + o, err := storage.CopyObject(ctx, "bucketname", "file1", "another-bucketname", "file2", nil) + if err != nil { + log.Fatal(err) + } + log.Println("copied file:", o) +} + +func ExampleDeleteObject() { + // To delete multiple objects in a bucket, first ListObjects then delete them. + ctx := Example_auth() + + // If you are using this package on App Engine Managed VMs runtime, + // you can init a bucket client with your app's default bucket name. + // See http://godoc.org/google.golang.org/appengine/file#DefaultBucketName. + const bucket = "bucketname" + + var query *storage.Query // Set up query as desired. + for { + objects, err := storage.ListObjects(ctx, bucket, query) + if err != nil { + log.Fatal(err) + } + for _, obj := range objects.Results { + log.Printf("deleting object name: %q, size: %v", obj.Name, obj.Size) + if err := storage.DeleteObject(ctx, bucket, obj.Name); err != nil { + log.Fatalf("unable to delete %q: %v", obj.Name, err) + } + } + // if there are more results, objects.Next will be non-nil. + query = objects.Next + if query == nil { + break + } + } + + log.Println("deleted all object items in the bucket you specified.") +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/storage/integration_test.go b/Godeps/_workspace/src/google.golang.org/cloud/storage/integration_test.go new file mode 100644 index 000000000..9c691964f --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/storage/integration_test.go @@ -0,0 +1,327 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build integration + +package storage + +import ( + "bytes" + "crypto/md5" + "fmt" + "io" + "io/ioutil" + "math/rand" + "os" + "testing" + "time" + + "google.golang.org/cloud/internal" + "google.golang.org/cloud/internal/testutil" +) + +var ( + bucket string + contents = make(map[string][]byte) + objects = []string{"obj1", "obj2", "obj/with/slashes"} + aclObjects = []string{"acl1", "acl2"} + copyObj = "copy-object" +) + +const envBucket = "GCLOUD_TESTS_GOLANG_PROJECT_ID" + +func TestObjects(t *testing.T) { + ctx := testutil.Context(ScopeFullControl) + bucket = os.Getenv(envBucket) + + // Cleanup. + cleanup(t, "obj") + + const defaultType = "text/plain" + + // Test Writer. + for _, obj := range objects { + t.Logf("Writing %v", obj) + wc := NewWriter(ctx, bucket, obj) + wc.ContentType = defaultType + c := randomContents() + if _, err := wc.Write(c); err != nil { + t.Errorf("Write for %v failed with %v", obj, err) + } + if err := wc.Close(); err != nil { + t.Errorf("Close for %v failed with %v", obj, err) + } + contents[obj] = c + } + + // Test Reader. + for _, obj := range objects { + t.Logf("Creating a reader to read %v", obj) + rc, err := NewReader(ctx, bucket, obj) + if err != nil { + t.Errorf("Can't create a reader for %v, errored with %v", obj, err) + } + slurp, err := ioutil.ReadAll(rc) + if err != nil { + t.Errorf("Can't ReadAll object %v, errored with %v", obj, err) + } + if got, want := slurp, contents[obj]; !bytes.Equal(got, want) { + t.Errorf("Contents (%v) = %q; want %q", obj, got, want) + } + rc.Close() + + // Test SignedURL + opts := &SignedURLOptions{ + GoogleAccessID: "xxx@clientid", + PrivateKey: dummyKey("rsa"), + Method: "GET", + MD5: []byte("202cb962ac59075b964b07152d234b70"), + Expires: time.Date(2020, time.October, 2, 10, 0, 0, 0, time.UTC), + ContentType: "application/json", + Headers: []string{"x-header1", "x-header2"}, + } + u, err := SignedURL(bucket, obj, opts) + if err != nil { + t.Fatalf("SignedURL(%q, %q) errored with %v", bucket, obj, err) + } + hc := internal.HTTPClient(ctx) + res, err := hc.Get(u) + if err != nil { + t.Fatalf("Can't get URL %q: %v", u, err) + } + slurp, err = ioutil.ReadAll(res.Body) + if err != nil { + t.Fatalf("Can't ReadAll signed object %v, errored with %v", obj, err) + } + if got, want := slurp, contents[obj]; !bytes.Equal(got, want) { + t.Errorf("Contents (%v) = %q; want %q", obj, got, want) + } + res.Body.Close() + } + + // Test NotFound. + _, err := NewReader(ctx, bucket, "obj-not-exists") + if err != ErrObjectNotExist { + t.Errorf("Object should not exist, err found to be %v", err) + } + + name := objects[0] + + // Test StatObject. + o, err := StatObject(ctx, bucket, name) + if err != nil { + t.Error(err) + } + if got, want := o.Name, name; got != want { + t.Errorf("Name (%v) = %q; want %q", name, got, want) + } + if got, want := o.ContentType, defaultType; got != want { + t.Errorf("ContentType (%v) = %q; want %q", name, got, want) + } + + // Test object copy. + copy, err := CopyObject(ctx, bucket, name, bucket, copyObj, nil) + if err != nil { + t.Errorf("CopyObject failed with %v", err) + } + if copy.Name != copyObj { + t.Errorf("Copy object's name = %q; want %q", copy.Name, copyObj) + } + if copy.Bucket != bucket { + t.Errorf("Copy object's bucket = %q; want %q", copy.Bucket, bucket) + } + + // Test UpdateAttrs. + updated, err := UpdateAttrs(ctx, bucket, name, ObjectAttrs{ + ContentType: "text/html", + ACL: []ACLRule{{Entity: "domain-google.com", Role: RoleReader}}, + }) + if err != nil { + t.Errorf("UpdateAttrs failed with %v", err) + } + if want := "text/html"; updated.ContentType != want { + t.Errorf("updated.ContentType == %q; want %q", updated.ContentType, want) + } + + // Test checksums. + checksumCases := []struct { + name string + contents [][]byte + size int64 + md5 string + crc32c uint32 + }{ + { + name: "checksum-object", + contents: [][]byte{[]byte("hello"), []byte("world")}, + size: 10, + md5: "fc5e038d38a57032085441e7fe7010b0", + crc32c: 1456190592, + }, + { + name: "zero-object", + contents: [][]byte{}, + size: 0, + md5: "d41d8cd98f00b204e9800998ecf8427e", + crc32c: 0, + }, + } + for _, c := range checksumCases { + wc := NewWriter(ctx, bucket, c.name) + for _, data := range c.contents { + if _, err := wc.Write(data); err != nil { + t.Errorf("Write(%q) failed with %q", data, err) + } + } + if err = wc.Close(); err != nil { + t.Errorf("%q: close failed with %q", c.name, err) + } + obj := wc.Object() + if got, want := obj.Size, c.size; got != want { + t.Errorf("Object (%q) Size = %v; want %v", c.name, got, want) + } + if got, want := fmt.Sprintf("%x", obj.MD5), c.md5; got != want { + t.Errorf("Object (%q) MD5 = %q; want %q", c.name, got, want) + } + if got, want := obj.CRC32C, c.crc32c; got != want { + t.Errorf("Object (%q) CRC32C = %v; want %v", c.name, got, want) + } + } + + // Test public ACL. + publicObj := objects[0] + if err = PutACLRule(ctx, bucket, publicObj, AllUsers, RoleReader); err != nil { + t.Errorf("PutACLRule failed with %v", err) + } + publicCtx := testutil.NoAuthContext() + rc, err := NewReader(publicCtx, bucket, publicObj) + if err != nil { + t.Error(err) + } + slurp, err := ioutil.ReadAll(rc) + if err != nil { + t.Errorf("ReadAll failed with %v", err) + } + if string(slurp) != string(contents[publicObj]) { + t.Errorf("Public object's content is expected to be %s, found %s", contents[publicObj], slurp) + } + rc.Close() + + // Test writer error handling. + wc := NewWriter(publicCtx, bucket, publicObj) + if _, err := wc.Write([]byte("hello")); err != nil { + t.Errorf("Write unexpectedly failed with %v", err) + } + if err = wc.Close(); err == nil { + t.Error("Close expected an error, found none") + } + + // DeleteObject object. + // The rest of the other object will be deleted during + // the initial cleanup. This tests exists, so we still can cover + // deletion if there are no objects on the bucket to clean. + if err := DeleteObject(ctx, bucket, copyObj); err != nil { + t.Errorf("Deletion of %v failed with %v", copyObj, err) + } + _, err = StatObject(ctx, bucket, copyObj) + if err != ErrObjectNotExist { + t.Errorf("Copy is expected to be deleted, stat errored with %v", err) + } +} + +func TestACL(t *testing.T) { + ctx := testutil.Context(ScopeFullControl) + cleanup(t, "acl") + entity := ACLEntity("domain-google.com") + if err := PutDefaultACLRule(ctx, bucket, entity, RoleReader); err != nil { + t.Errorf("Can't put default ACL rule for the bucket, errored with %v", err) + } + for _, obj := range aclObjects { + t.Logf("Writing %v", obj) + wc := NewWriter(ctx, bucket, obj) + c := randomContents() + if _, err := wc.Write(c); err != nil { + t.Errorf("Write for %v failed with %v", obj, err) + } + if err := wc.Close(); err != nil { + t.Errorf("Close for %v failed with %v", obj, err) + } + } + name := aclObjects[0] + acl, err := ACL(ctx, bucket, name) + if err != nil { + t.Errorf("Can't retrieve ACL of %v", name) + } + aclFound := false + for _, rule := range acl { + if rule.Entity == entity && rule.Role == RoleReader { + aclFound = true + } + } + if !aclFound { + t.Error("Expected to find an ACL rule for google.com domain users, but not found") + } + if err := DeleteACLRule(ctx, bucket, name, entity); err != nil { + t.Errorf("Can't delete the ACL rule for the entity: %v", entity) + } + + if err := PutBucketACLRule(ctx, bucket, "user-jbd@google.com", RoleReader); err != nil { + t.Errorf("Error while putting bucket ACL rule: %v", err) + } + bACL, err := BucketACL(ctx, bucket) + if err != nil { + t.Errorf("Error while getting the ACL of the bucket: %v", err) + } + bACLFound := false + for _, rule := range bACL { + if rule.Entity == "user-jbd@google.com" && rule.Role == RoleReader { + bACLFound = true + } + } + if !bACLFound { + t.Error("Expected to find an ACL rule for jbd@google.com user, but not found") + } + if err := DeleteBucketACLRule(ctx, bucket, "user-jbd@google.com"); err != nil { + t.Errorf("Error while deleting bucket ACL rule: %v", err) + } +} + +func cleanup(t *testing.T, prefix string) { + ctx := testutil.Context(ScopeFullControl) + var q *Query = &Query{ + Prefix: prefix, + } + for { + o, err := ListObjects(ctx, bucket, q) + if err != nil { + t.Fatalf("Cleanup List for bucket %v failed with error: %v", bucket, err) + } + for _, obj := range o.Results { + t.Logf("Cleanup deletion of %v", obj.Name) + if err = DeleteObject(ctx, bucket, obj.Name); err != nil { + t.Fatalf("Cleanup Delete for object %v failed with %v", obj.Name, err) + } + } + if o.Next == nil { + break + } + q = o.Next + } +} + +func randomContents() []byte { + h := md5.New() + io.WriteString(h, fmt.Sprintf("hello world%d", rand.Intn(100000))) + return h.Sum(nil) +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/storage/storage.go b/Godeps/_workspace/src/google.golang.org/cloud/storage/storage.go new file mode 100644 index 000000000..614daf3a8 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/storage/storage.go @@ -0,0 +1,370 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package storage contains a Google Cloud Storage client. +// +// This package is experimental and may make backwards-incompatible changes. +package storage + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "time" + + "google.golang.org/cloud/internal" + + "golang.org/x/net/context" + "google.golang.org/api/googleapi" + raw "google.golang.org/api/storage/v1" +) + +var ( + ErrBucketNotExist = errors.New("storage: bucket doesn't exist") + ErrObjectNotExist = errors.New("storage: object doesn't exist") +) + +const ( + // ScopeFullControl grants permissions to manage your + // data and permissions in Google Cloud Storage. + ScopeFullControl = raw.DevstorageFullControlScope + + // ScopeReadOnly grants permissions to + // view your data in Google Cloud Storage. + ScopeReadOnly = raw.DevstorageReadOnlyScope + + // ScopeReadWrite grants permissions to manage your + // data in Google Cloud Storage. + ScopeReadWrite = raw.DevstorageReadWriteScope +) + +// TODO(jbd): Add storage.buckets.list. +// TODO(jbd): Add storage.buckets.insert. +// TODO(jbd): Add storage.buckets.update. +// TODO(jbd): Add storage.buckets.delete. + +// TODO(jbd): Add storage.objects.watch. + +// BucketInfo returns the metadata for the specified bucket. +func BucketInfo(ctx context.Context, name string) (*Bucket, error) { + resp, err := rawService(ctx).Buckets.Get(name).Projection("full").Do() + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + return nil, ErrBucketNotExist + } + if err != nil { + return nil, err + } + return newBucket(resp), nil +} + +// ListObjects lists objects from the bucket. You can specify a query +// to filter the results. If q is nil, no filtering is applied. +func ListObjects(ctx context.Context, bucket string, q *Query) (*Objects, error) { + c := rawService(ctx).Objects.List(bucket) + c.Projection("full") + if q != nil { + c.Delimiter(q.Delimiter) + c.Prefix(q.Prefix) + c.Versions(q.Versions) + c.PageToken(q.Cursor) + if q.MaxResults > 0 { + c.MaxResults(int64(q.MaxResults)) + } + } + resp, err := c.Do() + if err != nil { + return nil, err + } + objects := &Objects{ + Results: make([]*Object, len(resp.Items)), + Prefixes: make([]string, len(resp.Prefixes)), + } + for i, item := range resp.Items { + objects.Results[i] = newObject(item) + } + for i, prefix := range resp.Prefixes { + objects.Prefixes[i] = prefix + } + if resp.NextPageToken != "" { + next := Query{} + if q != nil { + // keep the other filtering + // criteria if there is a query + next = *q + } + next.Cursor = resp.NextPageToken + objects.Next = &next + } + return objects, nil +} + +// SignedURLOptions allows you to restrict the access to the signed URL. +type SignedURLOptions struct { + // GoogleAccessID represents the authorizer of the signed URL generation. + // It is typically the Google service account client email address from + // the Google Developers Console in the form of "xxx@developer.gserviceaccount.com". + // Required. + GoogleAccessID string + + // PrivateKey is the Google service account private key. It is obtainable + // from the Google Developers Console. + // At https://console.developers.google.com/project//apiui/credential, + // create a service account client ID or reuse one of your existing service account + // credentials. Click on the "Generate new P12 key" to generate and download + // a new private key. Once you download the P12 file, use the following command + // to convert it into a PEM file. + // + // $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes + // + // Provide the contents of the PEM file as a byte slice. + // Required. + PrivateKey []byte + + // Method is the HTTP method to be used with the signed URL. + // Signed URLs can be used with GET, HEAD, PUT, and DELETE requests. + // Required. + Method string + + // Expires is the expiration time on the signed URL. It must be + // a datetime in the future. + // Required. + Expires time.Time + + // ContentType is the content type header the client must provide + // to use the generated signed URL. + // Optional. + ContentType string + + // Headers is a list of extention headers the client must provide + // in order to use the generated signed URL. + // Optional. + Headers []string + + // MD5 is the base64 encoded MD5 checksum of the file. + // If provided, the client should provide the exact value on the request + // header in order to use the signed URL. + // Optional. + MD5 []byte +} + +// SignedURL returns a URL for the specified object. Signed URLs allow +// the users access to a restricted resource for a limited time without having a +// Google account or signing in. For more information about the signed +// URLs, see https://cloud.google.com/storage/docs/accesscontrol#Signed-URLs. +func SignedURL(bucket, name string, opts *SignedURLOptions) (string, error) { + if opts == nil { + return "", errors.New("storage: missing required SignedURLOptions") + } + if opts.GoogleAccessID == "" || opts.PrivateKey == nil { + return "", errors.New("storage: missing required credentials to generate a signed URL") + } + if opts.Method == "" { + return "", errors.New("storage: missing required method option") + } + if opts.Expires.IsZero() { + return "", errors.New("storage: missing required expires option") + } + key, err := parseKey(opts.PrivateKey) + if err != nil { + return "", err + } + h := sha256.New() + fmt.Fprintf(h, "%s\n", opts.Method) + fmt.Fprintf(h, "%s\n", opts.MD5) + fmt.Fprintf(h, "%s\n", opts.ContentType) + fmt.Fprintf(h, "%d\n", opts.Expires.Unix()) + fmt.Fprintf(h, "%s", strings.Join(opts.Headers, "\n")) + fmt.Fprintf(h, "/%s/%s", bucket, name) + b, err := rsa.SignPKCS1v15( + rand.Reader, + key, + crypto.SHA256, + h.Sum(nil), + ) + if err != nil { + return "", err + } + encoded := base64.StdEncoding.EncodeToString(b) + u := &url.URL{ + Scheme: "https", + Host: "storage.googleapis.com", + Path: fmt.Sprintf("/%s/%s", bucket, name), + } + q := u.Query() + q.Set("GoogleAccessId", opts.GoogleAccessID) + q.Set("Expires", fmt.Sprintf("%d", opts.Expires.Unix())) + q.Set("Signature", string(encoded)) + u.RawQuery = q.Encode() + return u.String(), nil +} + +// StatObject returns meta information about the specified object. +func StatObject(ctx context.Context, bucket, name string) (*Object, error) { + o, err := rawService(ctx).Objects.Get(bucket, name).Projection("full").Do() + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + return nil, ErrObjectNotExist + } + if err != nil { + return nil, err + } + return newObject(o), nil +} + +// UpdateAttrs updates an object with the provided attributes. +// All zero-value attributes are ignored. +func UpdateAttrs(ctx context.Context, bucket, name string, attrs ObjectAttrs) (*Object, error) { + o, err := rawService(ctx).Objects.Patch(bucket, name, attrs.toRawObject(bucket)).Projection("full").Do() + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + return nil, ErrObjectNotExist + } + if err != nil { + return nil, err + } + return newObject(o), nil +} + +// DeleteObject deletes the single specified object. +func DeleteObject(ctx context.Context, bucket, name string) error { + return rawService(ctx).Objects.Delete(bucket, name).Do() +} + +// CopyObject copies the source object to the destination. +// The copied object's attributes are overwritten by attrs if non-nil. +func CopyObject(ctx context.Context, srcBucket, srcName string, destBucket, destName string, attrs *ObjectAttrs) (*Object, error) { + if srcBucket == "" || destBucket == "" { + return nil, errors.New("storage: srcBucket and destBucket must both be non-empty") + } + if srcName == "" || destName == "" { + return nil, errors.New("storage: srcName and destName must be non-empty") + } + var rawObject *raw.Object + if attrs != nil { + attrs.Name = destName + if attrs.ContentType == "" { + return nil, errors.New("storage: attrs.ContentType must be non-empty") + } + rawObject = attrs.toRawObject(destBucket) + } + o, err := rawService(ctx).Objects.Copy( + srcBucket, srcName, destBucket, destName, rawObject).Projection("full").Do() + if err != nil { + return nil, err + } + return newObject(o), nil +} + +// NewReader creates a new io.ReadCloser to read the contents +// of the object. +func NewReader(ctx context.Context, bucket, name string) (io.ReadCloser, error) { + return NewLimitedReader(ctx, bucket, name, 0, -1) +} + +// NewLimitedReader creates a new io.ReadCloser to read only the contents of +// the object beginning at offset. If length is -1, it reads to the end of the +// object. +func NewLimitedReader(ctx context.Context, bucket, name string, offset, length int64) (io.ReadCloser, error) { + hc := internal.HTTPClient(ctx) + u := &url.URL{ + Scheme: "https", + Host: "storage.googleapis.com", + Path: fmt.Sprintf("/%s/%s", bucket, name), + } + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, err + } + if length < 0 { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset)) + } else { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1)) + } + res, err := hc.Do(req) + if err != nil { + return nil, err + } + if res.StatusCode == http.StatusNotFound { + res.Body.Close() + return nil, ErrObjectNotExist + } + if res.StatusCode < 200 || res.StatusCode > 299 { + res.Body.Close() + return nil, fmt.Errorf("storage: can't read object %v/%v, status code: %v", bucket, name, res.Status) + } + if offset > 0 && res.StatusCode != http.StatusPartialContent { + res.Body.Close() + return nil, errors.New("storage: partial request not satisfied") + } + return res.Body, nil +} + +// NewWriter returns a storage Writer that writes to the GCS object +// identified by the specified name. +// If such an object doesn't exist, it creates one. +// Attributes can be set on the object by modifying the returned Writer's +// ObjectAttrs field before the first call to Write. The name parameter to this +// function is ignored if the Name field of the ObjectAttrs field is set to a +// non-empty string. +// +// It is the caller's responsibility to call Close when writing is done. +// +// The object is not available and any previous object with the same +// name is not replaced on Cloud Storage until Close is called. +func NewWriter(ctx context.Context, bucket, name string) *Writer { + return &Writer{ + ctx: ctx, + bucket: bucket, + name: name, + donec: make(chan struct{}), + } +} + +func rawService(ctx context.Context) *raw.Service { + return internal.Service(ctx, "storage", func(hc *http.Client) interface{} { + svc, _ := raw.New(hc) + return svc + }).(*raw.Service) +} + +// parseKey converts the binary contents of a private key file +// to an *rsa.PrivateKey. It detects whether the private key is in a +// PEM container or not. If so, it extracts the the private key +// from PEM container before conversion. It only supports PEM +// containers with no passphrase. +func parseKey(key []byte) (*rsa.PrivateKey, error) { + if block, _ := pem.Decode(key); block != nil { + key = block.Bytes + } + parsedKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + parsedKey, err = x509.ParsePKCS1PrivateKey(key) + if err != nil { + return nil, err + } + } + parsed, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("oauth2: private key is invalid") + } + return parsed, nil +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/storage/storage_test.go b/Godeps/_workspace/src/google.golang.org/cloud/storage/storage_test.go new file mode 100644 index 000000000..4d239e5c7 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/storage/storage_test.go @@ -0,0 +1,238 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "fmt" + "io/ioutil" + "log" + "strings" + "testing" + "time" + + "golang.org/x/net/context" +) + +func TestSignedURL(t *testing.T) { + expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00") + url, err := SignedURL("bucket-name", "object-name", &SignedURLOptions{ + GoogleAccessID: "xxx@clientid", + PrivateKey: dummyKey("rsa"), + Method: "GET", + MD5: []byte("202cb962ac59075b964b07152d234b70"), + Expires: expires, + ContentType: "application/json", + Headers: []string{"x-header1", "x-header2"}, + }) + if err != nil { + t.Error(err) + } + want := "https://storage.googleapis.com/bucket-name/object-name?" + + "Expires=1033570800&GoogleAccessId=xxx%40clientid&Signature=" + + "ITqNWQHr7ayIj%2B0Ds5%2FzUT2cWMQQouuFmu6L11Zd3kfNKvm3sjyGIzO" + + "gZsSUoter1SxP7BcrCzgqIZ9fQmgQnuIpqqLL4kcGmTbKsQS6hTknpJM%2F" + + "2lS4NY6UH1VXBgm2Tce28kz8rnmqG6svcGvtWuOgJsETeSIl1R9nAEIDCEq" + + "ZJzoOiru%2BODkHHkpoFjHWAwHugFHX%2B9EX4SxaytiN3oEy48HpYGWV0I" + + "h8NvU1hmeWzcLr41GnTADeCn7Eg%2Fb5H2GCNO70Cz%2Bw2fn%2BofLCUeR" + + "YQd%2FhES8oocv5kpHZkstc8s8uz3aKMsMauzZ9MOmGy%2F6VULBgIVvi6a" + + "AwEBIYOw%3D%3D" + if url != want { + t.Fatalf("Unexpected signed URL; found %v", url) + } +} + +func TestSignedURL_PEMPrivateKey(t *testing.T) { + expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00") + url, err := SignedURL("bucket-name", "object-name", &SignedURLOptions{ + GoogleAccessID: "xxx@clientid", + PrivateKey: dummyKey("pem"), + Method: "GET", + MD5: []byte("202cb962ac59075b964b07152d234b70"), + Expires: expires, + ContentType: "application/json", + Headers: []string{"x-header1", "x-header2"}, + }) + if err != nil { + t.Error(err) + } + want := "https://storage.googleapis.com/bucket-name/object-name?" + + "Expires=1033570800&GoogleAccessId=xxx%40clientid&Signature=" + + "B7XkS4dfmVDoe%2FoDeXZkWlYmg8u2kI0SizTrzL5%2B9RmKnb5j7Kf34DZ" + + "JL8Hcjr1MdPFLNg2QV4lEH86Gqgqt%2Fv3jFOTRl4wlzcRU%2FvV5c5HU8M" + + "qW0FZ0IDbqod2RdsMONLEO6yQWV2HWFrMLKl2yMFlWCJ47et%2BFaHe6v4Z" + + "EBc0%3D" + if url != want { + t.Fatalf("Unexpected signed URL; found %v", url) + } +} + +func TestSignedURL_MissingOptions(t *testing.T) { + pk := dummyKey("rsa") + var tests = []struct { + opts *SignedURLOptions + errMsg string + }{ + { + &SignedURLOptions{}, + "missing required credentials", + }, + { + &SignedURLOptions{GoogleAccessID: "access_id"}, + "missing required credentials", + }, + { + &SignedURLOptions{ + GoogleAccessID: "access_id", + PrivateKey: pk, + }, + "missing required method", + }, + { + &SignedURLOptions{ + GoogleAccessID: "access_id", + PrivateKey: pk, + Method: "PUT", + }, + "missing required expires", + }, + } + for _, test := range tests { + _, err := SignedURL("bucket", "name", test.opts) + if !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected err: %v, found: %v", test.errMsg, err) + } + } +} + +func dummyKey(kind string) []byte { + slurp, err := ioutil.ReadFile(fmt.Sprintf("./testdata/dummy_%s", kind)) + if err != nil { + log.Fatal(err) + } + return slurp +} + +func TestCopyObjectMissingFields(t *testing.T) { + var tests = []struct { + srcBucket, srcName, destBucket, destName string + errMsg string + }{ + { + "mybucket", "", "mybucket", "destname", + "srcName and destName must be non-empty", + }, + { + "mybucket", "srcname", "mybucket", "", + "srcName and destName must be non-empty", + }, + { + "", "srcfile", "mybucket", "destname", + "srcBucket and destBucket must both be non-empty", + }, + { + "mybucket", "srcfile", "", "destname", + "srcBucket and destBucket must both be non-empty", + }, + } + for i, test := range tests { + _, err := CopyObject(context.TODO(), test.srcBucket, test.srcName, test.destBucket, test.destName, nil) + if !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("CopyObject test #%v: err = %v, want %v", i, err, test.errMsg) + } + } +} + +func TestObjectNames(t *testing.T) { + // Naming requirements: https://cloud.google.com/storage/docs/bucket-naming + const maxLegalLength = 1024 + + type testT struct { + name, want string + } + tests := []testT{ + // Embedded characters important in URLs. + {"foo % bar", "foo%20%25%20bar"}, + {"foo ? bar", "foo%20%3F%20bar"}, + {"foo / bar", "foo%20/%20bar"}, + {"foo %?/ bar", "foo%20%25%3F/%20bar"}, + + // Non-Roman scripts + {"타코", "%ED%83%80%EC%BD%94"}, + {"世界", "%E4%B8%96%E7%95%8C"}, + + // Longest legal name + {strings.Repeat("a", maxLegalLength), strings.Repeat("a", maxLegalLength)}, + + // Line terminators besides CR and LF: https://en.wikipedia.org/wiki/Newline#Unicode + {"foo \u000b bar", "foo%20%0B%20bar"}, + {"foo \u000c bar", "foo%20%0C%20bar"}, + {"foo \u0085 bar", "foo%20%C2%85%20bar"}, + {"foo \u2028 bar", "foo%20%E2%80%A8%20bar"}, + {"foo \u2029 bar", "foo%20%E2%80%A9%20bar"}, + + // Null byte. + {"foo \u0000 bar", "foo%20%00%20bar"}, + + // Non-control characters that are discouraged, but not forbidden, according to the documentation. + {"foo # bar", "foo%20%23%20bar"}, + {"foo []*? bar", "foo%20%5B%5D%2A%3F%20bar"}, + + // Angstrom symbol singleton and normalized forms: http://unicode.org/reports/tr15/ + {"foo \u212b bar", "foo%20%E2%84%AB%20bar"}, + {"foo \u0041\u030a bar", "foo%20A%CC%8A%20bar"}, + {"foo \u00c5 bar", "foo%20%C3%85%20bar"}, + + // Hangul separating jamo: http://www.unicode.org/versions/Unicode7.0.0/ch18.pdf (Table 18-10) + {"foo \u3131\u314f bar", "foo%20%E3%84%B1%E3%85%8F%20bar"}, + {"foo \u1100\u1161 bar", "foo%20%E1%84%80%E1%85%A1%20bar"}, + {"foo \uac00 bar", "foo%20%EA%B0%80%20bar"}, + } + + // C0 control characters not forbidden by the docs. + var runes []rune + for r := rune(0x01); r <= rune(0x1f); r++ { + if r != '\u000a' && r != '\u000d' { + runes = append(runes, r) + } + } + tests = append(tests, testT{fmt.Sprintf("foo %s bar", string(runes)), "foo%20%01%02%03%04%05%06%07%08%09%0B%0C%0E%0F%10%11%12%13%14%15%16%17%18%19%1A%1B%1C%1D%1E%1F%20bar"}) + + // C1 control characters, plus DEL. + runes = nil + for r := rune(0x7f); r <= rune(0x9f); r++ { + runes = append(runes, r) + } + tests = append(tests, testT{fmt.Sprintf("foo %s bar", string(runes)), "foo%20%7F%C2%80%C2%81%C2%82%C2%83%C2%84%C2%85%C2%86%C2%87%C2%88%C2%89%C2%8A%C2%8B%C2%8C%C2%8D%C2%8E%C2%8F%C2%90%C2%91%C2%92%C2%93%C2%94%C2%95%C2%96%C2%97%C2%98%C2%99%C2%9A%C2%9B%C2%9C%C2%9D%C2%9E%C2%9F%20bar"}) + + opts := &SignedURLOptions{ + GoogleAccessID: "xxx@clientid", + PrivateKey: dummyKey("rsa"), + Method: "GET", + MD5: []byte("202cb962ac59075b964b07152d234b70"), + Expires: time.Date(2002, time.October, 2, 10, 0, 0, 0, time.UTC), + ContentType: "application/json", + Headers: []string{"x-header1", "x-header2"}, + } + + for _, test := range tests { + g, err := SignedURL("bucket-name", test.name, opts) + if err != nil { + t.Errorf("SignedURL(%q) err=%v, want nil", test.name, err) + } + if w := "/bucket-name/" + test.want; !strings.Contains(g, w) { + t.Errorf("SignedURL(%q)=%q, want substring %q", test.name, g, w) + } + } +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/storage/testdata/dummy_pem b/Godeps/_workspace/src/google.golang.org/cloud/storage/testdata/dummy_pem new file mode 100644 index 000000000..3428d4497 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/storage/testdata/dummy_pem @@ -0,0 +1,39 @@ +Bag Attributes + friendlyName: privatekey + localKeyID: 54 69 6D 65 20 31 34 31 36 38 35 32 30 30 34 37 37 32 +Key Attributes: +-----BEGIN RSA PRIVATE KEY----- +MIICXQIBAAKBgQCtCWMoJ2Bok2QoGFyU7A6IlGprO9QfUTT0jNrLkIbM5OWNIuDx +64+PEaTS5g5m+2Hz/lmd5jJKanAH4dY9LZzsaYAPq1K17Gcmg1hEisYeKsgOcjYY +kwRkV+natCTsC+tfWmS0voRh0jA1rI1J4MikceoHtgWdEuoHrrptRVpWKwIDAQAB +AoGAKp3uQvx3vSnX+BwP6Um+RpsvHpwMoW3xue1bEdnVqW8SrlERz+NxZw40ZxDs +KSbuuBZD4iTI7BUM5JQVnNm4FQY1YrPlWZLyI73Bj8RKTXrPdJheM/0r7xjiIXbQ +7w4cUSM9rVugnI/rxF2kPIQTGYI+EG/6+P+k6VvgPmC0T/ECQQDUPskiS18WaY+i +Koalbrb3GakaBoHrC1b4ln4CAv7fq7H4WvFvqi/2rxLhHYq31iwxYy8s7J7Sba1+ +5vwJ2TxZAkEA0LVfs3Q2VWZ+cM3bv0aYTalMXg6wT+LoNvk9HnOb0zQYajF3qm4G +ZFdfEqvOkje0zQ4fcihARKyda/VY84UGIwJBAIZa0FvjNmgrnn7bSKzEbxHwrnkJ +EYjGfuGR8mY3mzvfpiM+/oLfSslvfhX+62cALq18yco4ZzlxsFgaxAU//NECQDcS +NN94YcHlGqYPW9W7/gI4EwOaoqFhwV6II71+SfbP/0U+KlJZV+xwNZEKrqZcdqPI +/zkzL8ovNha/laokRrsCQQCyoPHGcBWj+VFbNoyQnX4tghc6rOY7n4pmpgQvU825 +TAM9vnYtSkKK/V56kEDNBO5LwiRsir95IUNclqqMKR1C +-----END RSA PRIVATE KEY----- +Bag Attributes + friendlyName: privatekey + localKeyID: 54 69 6D 65 20 31 34 31 36 38 35 32 30 30 34 37 37 32 +subject=/CN=1079432350659-nvog0vmn9s6pqr3kr4v2avbc7nkhoa11.apps.googleusercontent.com +issuer=/CN=1079432350659-nvog0vmn9s6pqr3kr4v2avbc7nkhoa11.apps.googleusercontent.com +-----BEGIN CERTIFICATE----- +MIICXTCCAcagAwIBAgIIHxTMQUVJRZ0wDQYJKoZIhvcNAQEFBQAwVDFSMFAGA1UE +AxNJMTA3OTQzMjM1MDY1OS1udm9nMHZtbjlzNnBxcjNrcjR2MmF2YmM3bmtob2Ex +MS5hcHBzLmdvb2dsZXVzZXJjb250ZW50LmNvbTAeFw0xNDExMjQxODAwMDRaFw0y +NDExMjExODAwMDRaMFQxUjBQBgNVBAMTSTEwNzk0MzIzNTA2NTktbnZvZzB2bW45 +czZwcXIza3I0djJhdmJjN25raG9hMTEuYXBwcy5nb29nbGV1c2VyY29udGVudC5j +b20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAK0JYygnYGiTZCgYXJTsDoiU +ams71B9RNPSM2suQhszk5Y0i4PHrj48RpNLmDmb7YfP+WZ3mMkpqcAfh1j0tnOxp +gA+rUrXsZyaDWESKxh4qyA5yNhiTBGRX6dq0JOwL619aZLS+hGHSMDWsjUngyKRx +6ge2BZ0S6geuum1FWlYrAgMBAAGjODA2MAwGA1UdEwEB/wQCMAAwDgYDVR0PAQH/ +BAQDAgeAMBYGA1UdJQEB/wQMMAoGCCsGAQUFBwMCMA0GCSqGSIb3DQEBBQUAA4GB +ACVvKkZkomHq3uffOQwdZ4VJYuxrvDGnZu/ExW9WngO2teEsjxABL41TNnRYHN5T +lMC19poFA2tR/DySDLJ2XNs/hSvyQUL6HHCncVdR4Srpie88j48peY1MZSMP51Jv +qagbbP5K5DSEu02/zZaV0kaCvLEN0KAtj/noDuOOnQU2 +-----END CERTIFICATE----- diff --git a/Godeps/_workspace/src/google.golang.org/cloud/storage/testdata/dummy_rsa b/Godeps/_workspace/src/google.golang.org/cloud/storage/testdata/dummy_rsa new file mode 100644 index 000000000..4ce6678db --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/storage/testdata/dummy_rsa @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAx4fm7dngEmOULNmAs1IGZ9Apfzh+BkaQ1dzkmbUgpcoghucE +DZRnAGd2aPyB6skGMXUytWQvNYav0WTR00wFtX1ohWTfv68HGXJ8QXCpyoSKSSFY +fuP9X36wBSkSX9J5DVgiuzD5VBdzUISSmapjKm+DcbRALjz6OUIPEWi1Tjl6p5RK +1w41qdbmt7E5/kGhKLDuT7+M83g4VWhgIvaAXtnhklDAggilPPa8ZJ1IFe31lNlr +k4DRk38nc6sEutdf3RL7QoH7FBusI7uXV03DC6dwN1kP4GE7bjJhcRb/7jYt7CQ9 +/E9Exz3c0yAp0yrTg0Fwh+qxfH9dKwN52S7SBwIDAQABAoIBAQCaCs26K07WY5Jt +3a2Cw3y2gPrIgTCqX6hJs7O5ByEhXZ8nBwsWANBUe4vrGaajQHdLj5OKfsIDrOvn +2NI1MqflqeAbu/kR32q3tq8/Rl+PPiwUsW3E6Pcf1orGMSNCXxeducF2iySySzh3 +nSIhCG5uwJDWI7a4+9KiieFgK1pt/Iv30q1SQS8IEntTfXYwANQrfKUVMmVF9aIK +6/WZE2yd5+q3wVVIJ6jsmTzoDCX6QQkkJICIYwCkglmVy5AeTckOVwcXL0jqw5Kf +5/soZJQwLEyBoQq7Kbpa26QHq+CJONetPP8Ssy8MJJXBT+u/bSseMb3Zsr5cr43e +DJOhwsThAoGBAPY6rPKl2NT/K7XfRCGm1sbWjUQyDShscwuWJ5+kD0yudnT/ZEJ1 +M3+KS/iOOAoHDdEDi9crRvMl0UfNa8MAcDKHflzxg2jg/QI+fTBjPP5GOX0lkZ9g +z6VePoVoQw2gpPFVNPPTxKfk27tEzbaffvOLGBEih0Kb7HTINkW8rIlzAoGBAM9y +1yr+jvfS1cGFtNU+Gotoihw2eMKtIqR03Yn3n0PK1nVCDKqwdUqCypz4+ml6cxRK +J8+Pfdh7D+ZJd4LEG6Y4QRDLuv5OA700tUoSHxMSNn3q9As4+T3MUyYxWKvTeu3U +f2NWP9ePU0lV8ttk7YlpVRaPQmc1qwooBA/z/8AdAoGAW9x0HWqmRICWTBnpjyxx +QGlW9rQ9mHEtUotIaRSJ6K/F3cxSGUEkX1a3FRnp6kPLcckC6NlqdNgNBd6rb2rA +cPl/uSkZP42Als+9YMoFPU/xrrDPbUhu72EDrj3Bllnyb168jKLa4VBOccUvggxr +Dm08I1hgYgdN5huzs7y6GeUCgYEAj+AZJSOJ6o1aXS6rfV3mMRve9bQ9yt8jcKXw +5HhOCEmMtaSKfnOF1Ziih34Sxsb7O2428DiX0mV/YHtBnPsAJidL0SdLWIapBzeg +KHArByIRkwE6IvJvwpGMdaex1PIGhx5i/3VZL9qiq/ElT05PhIb+UXgoWMabCp84 +OgxDK20CgYAeaFo8BdQ7FmVX2+EEejF+8xSge6WVLtkaon8bqcn6P0O8lLypoOhd +mJAYH8WU+UAy9pecUnDZj14LAGNVmYcse8HFX71MoshnvCTFEPVo4rZxIAGwMpeJ +5jgQ3slYLpqrGlcbLgUXBUgzEO684Wk/UV9DFPlHALVqCfXQ9dpJPg== +-----END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/google.golang.org/cloud/storage/types.go b/Godeps/_workspace/src/google.golang.org/cloud/storage/types.go new file mode 100644 index 000000000..e6548434e --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/storage/types.go @@ -0,0 +1,408 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "encoding/base64" + "io" + "sync" + "time" + + "golang.org/x/net/context" + raw "google.golang.org/api/storage/v1" +) + +// Bucket represents a Google Cloud Storage bucket. +type Bucket struct { + // Name is the name of the bucket. + Name string + + // ACL is the list of access control rules on the bucket. + ACL []ACLRule + + // DefaultObjectACL is the list of access controls to + // apply to new objects when no object ACL is provided. + DefaultObjectACL []ACLRule + + // Location is the location of the bucket. It defaults to "US". + Location string + + // Metageneration is the metadata generation of the bucket. + // Read-only. + Metageneration int64 + + // StorageClass is the storage class of the bucket. This defines + // how objects in the bucket are stored and determines the SLA + // and the cost of storage. Typical values are "STANDARD" and + // "DURABLE_REDUCED_AVAILABILITY". Defaults to "STANDARD". + StorageClass string + + // Created is the creation time of the bucket. + // Read-only. + Created time.Time +} + +func newBucket(b *raw.Bucket) *Bucket { + if b == nil { + return nil + } + bucket := &Bucket{ + Name: b.Name, + Location: b.Location, + Metageneration: b.Metageneration, + StorageClass: b.StorageClass, + Created: convertTime(b.TimeCreated), + } + acl := make([]ACLRule, len(b.Acl)) + for i, rule := range b.Acl { + acl[i] = ACLRule{ + Entity: ACLEntity(rule.Entity), + Role: ACLRole(rule.Role), + } + } + bucket.ACL = acl + objACL := make([]ACLRule, len(b.DefaultObjectAcl)) + for i, rule := range b.DefaultObjectAcl { + objACL[i] = ACLRule{ + Entity: ACLEntity(rule.Entity), + Role: ACLRole(rule.Role), + } + } + bucket.DefaultObjectACL = objACL + return bucket +} + +// ObjectAttrs is the user-editable object attributes. +type ObjectAttrs struct { + // Name is the name of the object. + Name string + + // ContentType is the MIME type of the object's content. + // Optional. + ContentType string + + // ContentLanguage is the optional RFC 1766 Content-Language of + // the object's content sent in response headers. + ContentLanguage string + + // ContentEncoding is the optional Content-Encoding of the object + // sent it the response headers. + ContentEncoding string + + // CacheControl is the optional Cache-Control header of the object + // sent in the response headers. + CacheControl string + + // ContentDisposition is the optional Content-Disposition header of the object + // sent in the response headers. + ContentDisposition string + + // ACL is the list of access control rules for the object. + // Optional. If nil or empty, existing ACL rules are preserved. + ACL []ACLRule + + // Metadata represents user-provided metadata, in key/value pairs. + // It can be nil if the current metadata values needs to preserved. + Metadata map[string]string +} + +func (o ObjectAttrs) toRawObject(bucket string) *raw.Object { + var acl []*raw.ObjectAccessControl + if len(o.ACL) > 0 { + acl = make([]*raw.ObjectAccessControl, len(o.ACL)) + for i, rule := range o.ACL { + acl[i] = &raw.ObjectAccessControl{ + Entity: string(rule.Entity), + Role: string(rule.Role), + } + } + } + return &raw.Object{ + Bucket: bucket, + Name: o.Name, + ContentType: o.ContentType, + ContentEncoding: o.ContentEncoding, + ContentLanguage: o.ContentLanguage, + CacheControl: o.CacheControl, + ContentDisposition: o.ContentDisposition, + Acl: acl, + Metadata: o.Metadata, + } +} + +// Object represents a Google Cloud Storage (GCS) object. +type Object struct { + // Bucket is the name of the bucket containing this GCS object. + Bucket string + + // Name is the name of the object within the bucket. + Name string + + // ContentType is the MIME type of the object's content. + ContentType string + + // ContentLanguage is the content language of the object's content. + ContentLanguage string + + // CacheControl is the Cache-Control header to be sent in the response + // headers when serving the object data. + CacheControl string + + // ACL is the list of access control rules for the object. + ACL []ACLRule + + // Owner is the owner of the object. + // + // If non-zero, it is in the form of "user-". + Owner string + + // Size is the length of the object's content. + Size int64 + + // ContentEncoding is the encoding of the object's content. + ContentEncoding string + + // MD5 is the MD5 hash of the object's content. + MD5 []byte + + // CRC32C is the CRC32 checksum of the object's content using + // the Castagnoli93 polynomial. + CRC32C uint32 + + // MediaLink is an URL to the object's content. + MediaLink string + + // Metadata represents user-provided metadata, in key/value pairs. + // It can be nil if no metadata is provided. + Metadata map[string]string + + // Generation is the generation number of the object's content. + Generation int64 + + // MetaGeneration is the version of the metadata for this + // object at this generation. This field is used for preconditions + // and for detecting changes in metadata. A metageneration number + // is only meaningful in the context of a particular generation + // of a particular object. + MetaGeneration int64 + + // StorageClass is the storage class of the bucket. + // This value defines how objects in the bucket are stored and + // determines the SLA and the cost of storage. Typical values are + // "STANDARD" and "DURABLE_REDUCED_AVAILABILITY". + // It defaults to "STANDARD". + StorageClass string + + // Deleted is the time the object was deleted. + // If not deleted, it is the zero value. + Deleted time.Time + + // Updated is the creation or modification time of the object. + // For buckets with versioning enabled, changing an object's + // metadata does not change this property. + Updated time.Time +} + +// convertTime converts a time in RFC3339 format to time.Time. +// If any error occurs in parsing, the zero-value time.Time is silently returned. +func convertTime(t string) time.Time { + var r time.Time + if t != "" { + r, _ = time.Parse(time.RFC3339, t) + } + return r +} + +func newObject(o *raw.Object) *Object { + if o == nil { + return nil + } + acl := make([]ACLRule, len(o.Acl)) + for i, rule := range o.Acl { + acl[i] = ACLRule{ + Entity: ACLEntity(rule.Entity), + Role: ACLRole(rule.Role), + } + } + owner := "" + if o.Owner != nil { + owner = o.Owner.Entity + } + md5, _ := base64.StdEncoding.DecodeString(o.Md5Hash) + var crc32c uint32 + d, err := base64.StdEncoding.DecodeString(o.Crc32c) + if err == nil && len(d) == 4 { + crc32c = uint32(d[0])<<24 + uint32(d[1])<<16 + uint32(d[2])<<8 + uint32(d[3]) + } + return &Object{ + Bucket: o.Bucket, + Name: o.Name, + ContentType: o.ContentType, + ContentLanguage: o.ContentLanguage, + CacheControl: o.CacheControl, + ACL: acl, + Owner: owner, + ContentEncoding: o.ContentEncoding, + Size: int64(o.Size), + MD5: md5, + CRC32C: crc32c, + MediaLink: o.MediaLink, + Metadata: o.Metadata, + Generation: o.Generation, + MetaGeneration: o.Metageneration, + StorageClass: o.StorageClass, + Deleted: convertTime(o.TimeDeleted), + Updated: convertTime(o.Updated), + } +} + +// Query represents a query to filter objects from a bucket. +type Query struct { + // Delimiter returns results in a directory-like fashion. + // Results will contain only objects whose names, aside from the + // prefix, do not contain delimiter. Objects whose names, + // aside from the prefix, contain delimiter will have their name, + // truncated after the delimiter, returned in prefixes. + // Duplicate prefixes are omitted. + // Optional. + Delimiter string + + // Prefix is the prefix filter to query objects + // whose names begin with this prefix. + // Optional. + Prefix string + + // Versions indicates whether multiple versions of the same + // object will be included in the results. + Versions bool + + // Cursor is a previously-returned page token + // representing part of the larger set of results to view. + // Optional. + Cursor string + + // MaxResults is the maximum number of items plus prefixes + // to return. As duplicate prefixes are omitted, + // fewer total results may be returned than requested. + // The default page limit is used if it is negative or zero. + MaxResults int +} + +// Objects represents a list of objects returned from +// a bucket look-p request and a query to retrieve more +// objects from the next pages. +type Objects struct { + // Results represent a list of object results. + Results []*Object + + // Next is the continuation query to retrieve more + // results with the same filtering criteria. If there + // are no more results to retrieve, it is nil. + Next *Query + + // Prefixes represents prefixes of objects + // matching-but-not-listed up to and including + // the requested delimiter. + Prefixes []string +} + +// contentTyper implements ContentTyper to enable an +// io.ReadCloser to specify its MIME type. +type contentTyper struct { + io.Reader + t string +} + +func (c *contentTyper) ContentType() string { + return c.t +} + +// A Writer writes a Cloud Storage object. +type Writer struct { + // ObjectAttrs are optional attributes to set on the object. Any attributes + // must be initialized before the first Write call. Nil or zero-valued + // attributes are ignored. + ObjectAttrs + + ctx context.Context + bucket string + name string + + once sync.Once + + opened bool + r io.Reader + pw *io.PipeWriter + + donec chan struct{} // closed after err and obj are set. + err error + obj *Object +} + +func (w *Writer) open() { + attrs := w.ObjectAttrs + // Always set the name, otherwise the backend + // rejects the request and responds with an HTTP 400. + if attrs.Name == "" { + attrs.Name = w.name + } + pr, pw := io.Pipe() + w.r = &contentTyper{pr, attrs.ContentType} + w.pw = pw + w.opened = true + + go func() { + resp, err := rawService(w.ctx).Objects.Insert( + w.bucket, attrs.toRawObject(w.bucket)).Media(w.r).Projection("full").Do() + w.err = err + if err == nil { + w.obj = newObject(resp) + } else { + pr.CloseWithError(w.err) + } + close(w.donec) + }() +} + +// Write appends to w. +func (w *Writer) Write(p []byte) (n int, err error) { + if w.err != nil { + return 0, w.err + } + if !w.opened { + w.open() + } + return w.pw.Write(p) +} + +// Close completes the write operation and flushes any buffered data. +// If Close doesn't return an error, metadata about the written object +// can be retrieved by calling Object. +func (w *Writer) Close() error { + if !w.opened { + w.open() + } + if err := w.pw.Close(); err != nil { + return err + } + <-w.donec + return w.err +} + +// Object returns metadata about a successfully-written object. +// It's only valid to call it after Close returns nil. +func (w *Writer) Object() *Object { + return w.obj +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/storage/types_test.go b/Godeps/_workspace/src/google.golang.org/cloud/storage/types_test.go new file mode 100644 index 000000000..02b71314c --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/storage/types_test.go @@ -0,0 +1,42 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "fmt" + "net/http" + "testing" + + "google.golang.org/cloud" +) + +type fakeTransport struct{} + +func (t *fakeTransport) RoundTrip(req *http.Request) (*http.Response, error) { + return nil, fmt.Errorf("error handling request") +} + +func TestErrorOnObjectsInsertCall(t *testing.T) { + ctx := cloud.NewContext("project-id", &http.Client{ + Transport: &fakeTransport{}}) + wc := NewWriter(ctx, "bucketname", "filename1") + wc.ContentType = "text/plain" + if _, err := wc.Write([]byte("hello world")); err == nil { + t.Errorf("expected error on write, got nil") + } + if err := wc.Close(); err == nil { + t.Errorf("expected error on close, got nil") + } +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/.travis.yml b/Godeps/_workspace/src/google.golang.org/grpc/.travis.yml new file mode 100644 index 000000000..250356014 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/.travis.yml @@ -0,0 +1,4 @@ +language: go + +script: + - make test testrace diff --git a/Godeps/_workspace/src/google.golang.org/grpc/CONTRIBUTING.md b/Godeps/_workspace/src/google.golang.org/grpc/CONTRIBUTING.md new file mode 100644 index 000000000..407d384a7 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/CONTRIBUTING.md @@ -0,0 +1,23 @@ +# How to contribute + +We definitely welcome patches and contribution to grpc! Here is some guideline +and information about how to do so. + +## Getting started + +### Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://cla.developers.google.com/clas). + +### Filing Issues +When filing an issue, make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +### Contributing code +Unless otherwise noted, the Go source files are distributed under the BSD-style license found in the LICENSE file. diff --git a/Godeps/_workspace/src/google.golang.org/grpc/LICENSE b/Godeps/_workspace/src/google.golang.org/grpc/LICENSE new file mode 100644 index 000000000..f4988b450 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/LICENSE @@ -0,0 +1,28 @@ +Copyright 2014, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/google.golang.org/grpc/Makefile b/Godeps/_workspace/src/google.golang.org/grpc/Makefile new file mode 100644 index 000000000..0dc225ff4 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/Makefile @@ -0,0 +1,47 @@ +.PHONY: \ + all \ + deps \ + updatedeps \ + testdeps \ + updatetestdeps \ + build \ + proto \ + test \ + testrace \ + clean \ + +all: test testrace + +deps: + go get -d -v google.golang.org/grpc/... + +updatedeps: + go get -d -v -u -f google.golang.org/grpc/... + +testdeps: + go get -d -v -t google.golang.org/grpc/... + +updatetestdeps: + go get -d -v -t -u -f google.golang.org/grpc/... + +build: deps + go build google.golang.org/grpc/... + +proto: + @ if ! which protoc > /dev/null; then \ + echo "error: protoc not installed" >&2; \ + exit 1; \ + fi + go get -v github.com/golang/protobuf/protoc-gen-go + for file in $$(git ls-files '*.proto'); do \ + protoc -I $$(dirname $$file) --go_out=plugins=grpc:$$(dirname $$file) $$file; \ + done + +test: testdeps + go test -v -cpu 1,4 google.golang.org/grpc/... + +testrace: testdeps + go test -v -race -cpu 1,4 google.golang.org/grpc/... + +clean: + go clean google.golang.org/grpc/... diff --git a/Godeps/_workspace/src/google.golang.org/grpc/PATENTS b/Godeps/_workspace/src/google.golang.org/grpc/PATENTS new file mode 100644 index 000000000..619f9dbfe --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the GRPC project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of GRPC, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of GRPC. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of GRPC or any code incorporated within this +implementation of GRPC constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of GRPC +shall terminate as of the date such litigation is filed. diff --git a/Godeps/_workspace/src/google.golang.org/grpc/README.md b/Godeps/_workspace/src/google.golang.org/grpc/README.md new file mode 100644 index 000000000..57a88341c --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/README.md @@ -0,0 +1,28 @@ +#gRPC-Go + +[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) + +The Go implementation of [gRPC](https://github.com/grpc/grpc) + +Installation +------------ + +To install this package, you need to install Go 1.4 or above and setup your Go workspace on your computer. The simplest way to install the library is to run: + +``` +$ go get google.golang.org/grpc +``` + +Prerequisites +------------- + +This requires Go 1.4 or above. + +Documentation +------------- +You can find more detailed documentation and examples in the [examples directory](examples/). + +Status +------ +Beta release + diff --git a/Godeps/_workspace/src/google.golang.org/grpc/benchmark/benchmark.go b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/benchmark.go new file mode 100644 index 000000000..7215d35a5 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/benchmark.go @@ -0,0 +1,147 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* +Package benchmark implements the building blocks to setup end-to-end gRPC benchmarks. +*/ +package benchmark + +import ( + "io" + "math" + "net" + + "golang.org/x/net/context" + "google.golang.org/grpc" + testpb "google.golang.org/grpc/benchmark/grpc_testing" + "google.golang.org/grpc/grpclog" +) + +func newPayload(t testpb.PayloadType, size int) *testpb.Payload { + if size < 0 { + grpclog.Fatalf("Requested a response with invalid length %d", size) + } + body := make([]byte, size) + switch t { + case testpb.PayloadType_COMPRESSABLE: + case testpb.PayloadType_UNCOMPRESSABLE: + grpclog.Fatalf("PayloadType UNCOMPRESSABLE is not supported") + default: + grpclog.Fatalf("Unsupported payload type: %d", t) + } + return &testpb.Payload{ + Type: t, + Body: body, + } +} + +type testServer struct { +} + +func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{ + Payload: newPayload(in.ResponseType, int(in.ResponseSize)), + }, nil +} + +func (s *testServer) StreamingCall(stream testpb.TestService_StreamingCallServer) error { + for { + in, err := stream.Recv() + if err == io.EOF { + // read done. + return nil + } + if err != nil { + return err + } + if err := stream.Send(&testpb.SimpleResponse{ + Payload: newPayload(in.ResponseType, int(in.ResponseSize)), + }); err != nil { + return err + } + } +} + +// StartServer starts a gRPC server serving a benchmark service on the given +// address, which may be something like "localhost:0". It returns its listen +// address and a function to stop the server. +func StartServer(addr string) (string, func()) { + lis, err := net.Listen("tcp", addr) + if err != nil { + grpclog.Fatalf("Failed to listen: %v", err) + } + s := grpc.NewServer(grpc.MaxConcurrentStreams(math.MaxUint32)) + testpb.RegisterTestServiceServer(s, &testServer{}) + go s.Serve(lis) + return lis.Addr().String(), func() { + s.Stop() + } +} + +// DoUnaryCall performs an unary RPC with given stub and request and response sizes. +func DoUnaryCall(tc testpb.TestServiceClient, reqSize, respSize int) { + pl := newPayload(testpb.PayloadType_COMPRESSABLE, reqSize) + req := &testpb.SimpleRequest{ + ResponseType: pl.Type, + ResponseSize: int32(respSize), + Payload: pl, + } + if _, err := tc.UnaryCall(context.Background(), req); err != nil { + grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) + } +} + +// DoStreamingRoundTrip performs a round trip for a single streaming rpc. +func DoStreamingRoundTrip(tc testpb.TestServiceClient, stream testpb.TestService_StreamingCallClient, reqSize, respSize int) { + pl := newPayload(testpb.PayloadType_COMPRESSABLE, reqSize) + req := &testpb.SimpleRequest{ + ResponseType: pl.Type, + ResponseSize: int32(respSize), + Payload: pl, + } + if err := stream.Send(req); err != nil { + grpclog.Fatalf("StreamingCall(_).Send: %v", err) + } + if _, err := stream.Recv(); err != nil { + grpclog.Fatalf("StreamingCall(_).Recv: %v", err) + } +} + +// NewClientConn creates a gRPC client connection to addr. +func NewClientConn(addr string) *grpc.ClientConn { + conn, err := grpc.Dial(addr, grpc.WithInsecure()) + if err != nil { + grpclog.Fatalf("NewClientConn(%q) failed to create a ClientConn %v", addr, err) + } + return conn +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/benchmark/benchmark_test.go b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/benchmark_test.go new file mode 100644 index 000000000..97779e22b --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/benchmark_test.go @@ -0,0 +1,197 @@ +package benchmark + +import ( + "os" + "sync" + "testing" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + testpb "google.golang.org/grpc/benchmark/grpc_testing" + "google.golang.org/grpc/benchmark/stats" +) + +func runUnary(b *testing.B, maxConcurrentCalls int) { + s := stats.AddStats(b, 38) + b.StopTimer() + target, stopper := StartServer("localhost:0") + defer stopper() + conn := NewClientConn(target) + tc := testpb.NewTestServiceClient(conn) + + // Warm up connection. + for i := 0; i < 10; i++ { + unaryCaller(tc) + } + ch := make(chan int, maxConcurrentCalls*4) + var ( + mu sync.Mutex + wg sync.WaitGroup + ) + wg.Add(maxConcurrentCalls) + + // Distribute the b.N calls over maxConcurrentCalls workers. + for i := 0; i < maxConcurrentCalls; i++ { + go func() { + for range ch { + start := time.Now() + unaryCaller(tc) + elapse := time.Since(start) + mu.Lock() + s.Add(elapse) + mu.Unlock() + } + wg.Done() + }() + } + b.StartTimer() + for i := 0; i < b.N; i++ { + ch <- i + } + b.StopTimer() + close(ch) + wg.Wait() + conn.Close() +} + +func runStream(b *testing.B, maxConcurrentCalls int) { + s := stats.AddStats(b, 38) + b.StopTimer() + target, stopper := StartServer("localhost:0") + defer stopper() + conn := NewClientConn(target) + tc := testpb.NewTestServiceClient(conn) + + // Warm up connection. + stream, err := tc.StreamingCall(context.Background()) + if err != nil { + b.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) + } + for i := 0; i < 10; i++ { + streamCaller(tc, stream) + } + + ch := make(chan int, maxConcurrentCalls*4) + var ( + mu sync.Mutex + wg sync.WaitGroup + ) + wg.Add(maxConcurrentCalls) + + // Distribute the b.N calls over maxConcurrentCalls workers. + for i := 0; i < maxConcurrentCalls; i++ { + go func() { + stream, err := tc.StreamingCall(context.Background()) + if err != nil { + b.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) + } + for range ch { + start := time.Now() + streamCaller(tc, stream) + elapse := time.Since(start) + mu.Lock() + s.Add(elapse) + mu.Unlock() + } + wg.Done() + }() + } + b.StartTimer() + for i := 0; i < b.N; i++ { + ch <- i + } + b.StopTimer() + close(ch) + wg.Wait() + conn.Close() +} +func unaryCaller(client testpb.TestServiceClient) { + DoUnaryCall(client, 1, 1) +} + +func streamCaller(client testpb.TestServiceClient, stream testpb.TestService_StreamingCallClient) { + DoStreamingRoundTrip(client, stream, 1, 1) +} + +func BenchmarkClientStreamc1(b *testing.B) { + grpc.EnableTracing = true + runStream(b, 1) +} + +func BenchmarkClientStreamc8(b *testing.B) { + grpc.EnableTracing = true + runStream(b, 8) +} + +func BenchmarkClientStreamc64(b *testing.B) { + grpc.EnableTracing = true + runStream(b, 64) +} + +func BenchmarkClientStreamc512(b *testing.B) { + grpc.EnableTracing = true + runStream(b, 512) +} +func BenchmarkClientUnaryc1(b *testing.B) { + grpc.EnableTracing = true + runUnary(b, 1) +} + +func BenchmarkClientUnaryc8(b *testing.B) { + grpc.EnableTracing = true + runUnary(b, 8) +} + +func BenchmarkClientUnaryc64(b *testing.B) { + grpc.EnableTracing = true + runUnary(b, 64) +} + +func BenchmarkClientUnaryc512(b *testing.B) { + grpc.EnableTracing = true + runUnary(b, 512) +} + +func BenchmarkClientStreamNoTracec1(b *testing.B) { + grpc.EnableTracing = false + runStream(b, 1) +} + +func BenchmarkClientStreamNoTracec8(b *testing.B) { + grpc.EnableTracing = false + runStream(b, 8) +} + +func BenchmarkClientStreamNoTracec64(b *testing.B) { + grpc.EnableTracing = false + runStream(b, 64) +} + +func BenchmarkClientStreamNoTracec512(b *testing.B) { + grpc.EnableTracing = false + runStream(b, 512) +} +func BenchmarkClientUnaryNoTracec1(b *testing.B) { + grpc.EnableTracing = false + runUnary(b, 1) +} + +func BenchmarkClientUnaryNoTracec8(b *testing.B) { + grpc.EnableTracing = false + runUnary(b, 8) +} + +func BenchmarkClientUnaryNoTracec64(b *testing.B) { + grpc.EnableTracing = false + runUnary(b, 64) +} + +func BenchmarkClientUnaryNoTracec512(b *testing.B) { + grpc.EnableTracing = false + runUnary(b, 512) +} + +func TestMain(m *testing.M) { + os.Exit(stats.RunTestMain(m)) +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/benchmark/client/main.go b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/client/main.go new file mode 100644 index 000000000..f9e2a83db --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/client/main.go @@ -0,0 +1,161 @@ +package main + +import ( + "flag" + "math" + "net" + "net/http" + _ "net/http/pprof" + "sync" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/benchmark" + testpb "google.golang.org/grpc/benchmark/grpc_testing" + "google.golang.org/grpc/benchmark/stats" + "google.golang.org/grpc/grpclog" +) + +var ( + server = flag.String("server", "", "The server address") + maxConcurrentRPCs = flag.Int("max_concurrent_rpcs", 1, "The max number of concurrent RPCs") + duration = flag.Int("duration", math.MaxInt32, "The duration in seconds to run the benchmark client") + trace = flag.Bool("trace", true, "Whether tracing is on") + rpcType = flag.Int("rpc_type", 0, + `Configure different client rpc type. Valid options are: + 0 : unary call; + 1 : streaming call.`) +) + +func unaryCaller(client testpb.TestServiceClient) { + benchmark.DoUnaryCall(client, 1, 1) +} + +func streamCaller(client testpb.TestServiceClient, stream testpb.TestService_StreamingCallClient) { + benchmark.DoStreamingRoundTrip(client, stream, 1, 1) +} + +func buildConnection() (s *stats.Stats, conn *grpc.ClientConn, tc testpb.TestServiceClient) { + s = stats.NewStats(256) + conn = benchmark.NewClientConn(*server) + tc = testpb.NewTestServiceClient(conn) + return s, conn, tc +} + +func closeLoopUnary() { + s, conn, tc := buildConnection() + + for i := 0; i < 100; i++ { + unaryCaller(tc) + } + ch := make(chan int, *maxConcurrentRPCs*4) + var ( + mu sync.Mutex + wg sync.WaitGroup + ) + wg.Add(*maxConcurrentRPCs) + + for i := 0; i < *maxConcurrentRPCs; i++ { + go func() { + for _ = range ch { + start := time.Now() + unaryCaller(tc) + elapse := time.Since(start) + mu.Lock() + s.Add(elapse) + mu.Unlock() + } + wg.Done() + }() + } + // Stop the client when time is up. + done := make(chan struct{}) + go func() { + <-time.After(time.Duration(*duration) * time.Second) + close(done) + }() + ok := true + for ok { + select { + case ch <- 0: + case <-done: + ok = false + } + } + close(ch) + wg.Wait() + conn.Close() + grpclog.Println(s.String()) + +} + +func closeLoopStream() { + s, conn, tc := buildConnection() + stream, err := tc.StreamingCall(context.Background()) + if err != nil { + grpclog.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) + } + for i := 0; i < 100; i++ { + streamCaller(tc, stream) + } + ch := make(chan int, *maxConcurrentRPCs*4) + var ( + mu sync.Mutex + wg sync.WaitGroup + ) + wg.Add(*maxConcurrentRPCs) + // Distribute RPCs over maxConcurrentCalls workers. + for i := 0; i < *maxConcurrentRPCs; i++ { + go func() { + for _ = range ch { + start := time.Now() + streamCaller(tc, stream) + elapse := time.Since(start) + mu.Lock() + s.Add(elapse) + mu.Unlock() + } + wg.Done() + }() + } + // Stop the client when time is up. + done := make(chan struct{}) + go func() { + <-time.After(time.Duration(*duration) * time.Second) + close(done) + }() + ok := true + for ok { + select { + case ch <- 0: + case <-done: + ok = false + } + } + close(ch) + wg.Wait() + conn.Close() + grpclog.Println(s.String()) +} + +func main() { + flag.Parse() + grpc.EnableTracing = *trace + go func() { + lis, err := net.Listen("tcp", ":0") + if err != nil { + grpclog.Fatalf("Failed to listen: %v", err) + } + grpclog.Println("Client profiling address: ", lis.Addr().String()) + if err := http.Serve(lis, nil); err != nil { + grpclog.Fatalf("Failed to serve: %v", err) + } + }() + switch *rpcType { + case 0: + closeLoopUnary() + case 1: + closeLoopStream() + } +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/benchmark/grpc_testing/test.pb.go b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/grpc_testing/test.pb.go new file mode 100644 index 000000000..619c450ca --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/grpc_testing/test.pb.go @@ -0,0 +1,641 @@ +// Code generated by protoc-gen-go. +// source: test.proto +// DO NOT EDIT! + +/* +Package grpc_testing is a generated protocol buffer package. + +It is generated from these files: + test.proto + +It has these top-level messages: + StatsRequest + ServerStats + Payload + HistogramData + ClientConfig + Mark + ClientArgs + ClientStats + ClientStatus + ServerConfig + ServerArgs + ServerStatus + SimpleRequest + SimpleResponse +*/ +package grpc_testing + +import proto "github.com/golang/protobuf/proto" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal + +type PayloadType int32 + +const ( + // Compressable text format. + PayloadType_COMPRESSABLE PayloadType = 0 + // Uncompressable binary format. + PayloadType_UNCOMPRESSABLE PayloadType = 1 + // Randomly chosen from all other formats defined in this enum. + PayloadType_RANDOM PayloadType = 2 +) + +var PayloadType_name = map[int32]string{ + 0: "COMPRESSABLE", + 1: "UNCOMPRESSABLE", + 2: "RANDOM", +} +var PayloadType_value = map[string]int32{ + "COMPRESSABLE": 0, + "UNCOMPRESSABLE": 1, + "RANDOM": 2, +} + +func (x PayloadType) String() string { + return proto.EnumName(PayloadType_name, int32(x)) +} + +type ClientType int32 + +const ( + ClientType_SYNCHRONOUS_CLIENT ClientType = 0 + ClientType_ASYNC_CLIENT ClientType = 1 +) + +var ClientType_name = map[int32]string{ + 0: "SYNCHRONOUS_CLIENT", + 1: "ASYNC_CLIENT", +} +var ClientType_value = map[string]int32{ + "SYNCHRONOUS_CLIENT": 0, + "ASYNC_CLIENT": 1, +} + +func (x ClientType) String() string { + return proto.EnumName(ClientType_name, int32(x)) +} + +type ServerType int32 + +const ( + ServerType_SYNCHRONOUS_SERVER ServerType = 0 + ServerType_ASYNC_SERVER ServerType = 1 +) + +var ServerType_name = map[int32]string{ + 0: "SYNCHRONOUS_SERVER", + 1: "ASYNC_SERVER", +} +var ServerType_value = map[string]int32{ + "SYNCHRONOUS_SERVER": 0, + "ASYNC_SERVER": 1, +} + +func (x ServerType) String() string { + return proto.EnumName(ServerType_name, int32(x)) +} + +type RpcType int32 + +const ( + RpcType_UNARY RpcType = 0 + RpcType_STREAMING RpcType = 1 +) + +var RpcType_name = map[int32]string{ + 0: "UNARY", + 1: "STREAMING", +} +var RpcType_value = map[string]int32{ + "UNARY": 0, + "STREAMING": 1, +} + +func (x RpcType) String() string { + return proto.EnumName(RpcType_name, int32(x)) +} + +type StatsRequest struct { + // run number + TestNum int32 `protobuf:"varint,1,opt,name=test_num" json:"test_num,omitempty"` +} + +func (m *StatsRequest) Reset() { *m = StatsRequest{} } +func (m *StatsRequest) String() string { return proto.CompactTextString(m) } +func (*StatsRequest) ProtoMessage() {} + +type ServerStats struct { + // wall clock time + TimeElapsed float64 `protobuf:"fixed64,1,opt,name=time_elapsed" json:"time_elapsed,omitempty"` + // user time used by the server process and threads + TimeUser float64 `protobuf:"fixed64,2,opt,name=time_user" json:"time_user,omitempty"` + // server time used by the server process and all threads + TimeSystem float64 `protobuf:"fixed64,3,opt,name=time_system" json:"time_system,omitempty"` +} + +func (m *ServerStats) Reset() { *m = ServerStats{} } +func (m *ServerStats) String() string { return proto.CompactTextString(m) } +func (*ServerStats) ProtoMessage() {} + +type Payload struct { + // The type of data in body. + Type PayloadType `protobuf:"varint,1,opt,name=type,enum=grpc.testing.PayloadType" json:"type,omitempty"` + // Primary contents of payload. + Body []byte `protobuf:"bytes,2,opt,name=body,proto3" json:"body,omitempty"` +} + +func (m *Payload) Reset() { *m = Payload{} } +func (m *Payload) String() string { return proto.CompactTextString(m) } +func (*Payload) ProtoMessage() {} + +type HistogramData struct { + Bucket []uint32 `protobuf:"varint,1,rep,name=bucket" json:"bucket,omitempty"` + MinSeen float64 `protobuf:"fixed64,2,opt,name=min_seen" json:"min_seen,omitempty"` + MaxSeen float64 `protobuf:"fixed64,3,opt,name=max_seen" json:"max_seen,omitempty"` + Sum float64 `protobuf:"fixed64,4,opt,name=sum" json:"sum,omitempty"` + SumOfSquares float64 `protobuf:"fixed64,5,opt,name=sum_of_squares" json:"sum_of_squares,omitempty"` + Count float64 `protobuf:"fixed64,6,opt,name=count" json:"count,omitempty"` +} + +func (m *HistogramData) Reset() { *m = HistogramData{} } +func (m *HistogramData) String() string { return proto.CompactTextString(m) } +func (*HistogramData) ProtoMessage() {} + +type ClientConfig struct { + ServerTargets []string `protobuf:"bytes,1,rep,name=server_targets" json:"server_targets,omitempty"` + ClientType ClientType `protobuf:"varint,2,opt,name=client_type,enum=grpc.testing.ClientType" json:"client_type,omitempty"` + EnableSsl bool `protobuf:"varint,3,opt,name=enable_ssl" json:"enable_ssl,omitempty"` + OutstandingRpcsPerChannel int32 `protobuf:"varint,4,opt,name=outstanding_rpcs_per_channel" json:"outstanding_rpcs_per_channel,omitempty"` + ClientChannels int32 `protobuf:"varint,5,opt,name=client_channels" json:"client_channels,omitempty"` + PayloadSize int32 `protobuf:"varint,6,opt,name=payload_size" json:"payload_size,omitempty"` + // only for async client: + AsyncClientThreads int32 `protobuf:"varint,7,opt,name=async_client_threads" json:"async_client_threads,omitempty"` + RpcType RpcType `protobuf:"varint,8,opt,name=rpc_type,enum=grpc.testing.RpcType" json:"rpc_type,omitempty"` +} + +func (m *ClientConfig) Reset() { *m = ClientConfig{} } +func (m *ClientConfig) String() string { return proto.CompactTextString(m) } +func (*ClientConfig) ProtoMessage() {} + +// Request current stats +type Mark struct { +} + +func (m *Mark) Reset() { *m = Mark{} } +func (m *Mark) String() string { return proto.CompactTextString(m) } +func (*Mark) ProtoMessage() {} + +type ClientArgs struct { + Setup *ClientConfig `protobuf:"bytes,1,opt,name=setup" json:"setup,omitempty"` + Mark *Mark `protobuf:"bytes,2,opt,name=mark" json:"mark,omitempty"` +} + +func (m *ClientArgs) Reset() { *m = ClientArgs{} } +func (m *ClientArgs) String() string { return proto.CompactTextString(m) } +func (*ClientArgs) ProtoMessage() {} + +func (m *ClientArgs) GetSetup() *ClientConfig { + if m != nil { + return m.Setup + } + return nil +} + +func (m *ClientArgs) GetMark() *Mark { + if m != nil { + return m.Mark + } + return nil +} + +type ClientStats struct { + Latencies *HistogramData `protobuf:"bytes,1,opt,name=latencies" json:"latencies,omitempty"` + TimeElapsed float64 `protobuf:"fixed64,3,opt,name=time_elapsed" json:"time_elapsed,omitempty"` + TimeUser float64 `protobuf:"fixed64,4,opt,name=time_user" json:"time_user,omitempty"` + TimeSystem float64 `protobuf:"fixed64,5,opt,name=time_system" json:"time_system,omitempty"` +} + +func (m *ClientStats) Reset() { *m = ClientStats{} } +func (m *ClientStats) String() string { return proto.CompactTextString(m) } +func (*ClientStats) ProtoMessage() {} + +func (m *ClientStats) GetLatencies() *HistogramData { + if m != nil { + return m.Latencies + } + return nil +} + +type ClientStatus struct { + Stats *ClientStats `protobuf:"bytes,1,opt,name=stats" json:"stats,omitempty"` +} + +func (m *ClientStatus) Reset() { *m = ClientStatus{} } +func (m *ClientStatus) String() string { return proto.CompactTextString(m) } +func (*ClientStatus) ProtoMessage() {} + +func (m *ClientStatus) GetStats() *ClientStats { + if m != nil { + return m.Stats + } + return nil +} + +type ServerConfig struct { + ServerType ServerType `protobuf:"varint,1,opt,name=server_type,enum=grpc.testing.ServerType" json:"server_type,omitempty"` + Threads int32 `protobuf:"varint,2,opt,name=threads" json:"threads,omitempty"` + EnableSsl bool `protobuf:"varint,3,opt,name=enable_ssl" json:"enable_ssl,omitempty"` +} + +func (m *ServerConfig) Reset() { *m = ServerConfig{} } +func (m *ServerConfig) String() string { return proto.CompactTextString(m) } +func (*ServerConfig) ProtoMessage() {} + +type ServerArgs struct { + Setup *ServerConfig `protobuf:"bytes,1,opt,name=setup" json:"setup,omitempty"` + Mark *Mark `protobuf:"bytes,2,opt,name=mark" json:"mark,omitempty"` +} + +func (m *ServerArgs) Reset() { *m = ServerArgs{} } +func (m *ServerArgs) String() string { return proto.CompactTextString(m) } +func (*ServerArgs) ProtoMessage() {} + +func (m *ServerArgs) GetSetup() *ServerConfig { + if m != nil { + return m.Setup + } + return nil +} + +func (m *ServerArgs) GetMark() *Mark { + if m != nil { + return m.Mark + } + return nil +} + +type ServerStatus struct { + Stats *ServerStats `protobuf:"bytes,1,opt,name=stats" json:"stats,omitempty"` + Port int32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"` +} + +func (m *ServerStatus) Reset() { *m = ServerStatus{} } +func (m *ServerStatus) String() string { return proto.CompactTextString(m) } +func (*ServerStatus) ProtoMessage() {} + +func (m *ServerStatus) GetStats() *ServerStats { + if m != nil { + return m.Stats + } + return nil +} + +type SimpleRequest struct { + // Desired payload type in the response from the server. + // If response_type is RANDOM, server randomly chooses one from other formats. + ResponseType PayloadType `protobuf:"varint,1,opt,name=response_type,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` + // Desired payload size in the response from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + ResponseSize int32 `protobuf:"varint,2,opt,name=response_size" json:"response_size,omitempty"` + // Optional input payload sent along with the request. + Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"` +} + +func (m *SimpleRequest) Reset() { *m = SimpleRequest{} } +func (m *SimpleRequest) String() string { return proto.CompactTextString(m) } +func (*SimpleRequest) ProtoMessage() {} + +func (m *SimpleRequest) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +type SimpleResponse struct { + Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` +} + +func (m *SimpleResponse) Reset() { *m = SimpleResponse{} } +func (m *SimpleResponse) String() string { return proto.CompactTextString(m) } +func (*SimpleResponse) ProtoMessage() {} + +func (m *SimpleResponse) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +func init() { + proto.RegisterEnum("grpc.testing.PayloadType", PayloadType_name, PayloadType_value) + proto.RegisterEnum("grpc.testing.ClientType", ClientType_name, ClientType_value) + proto.RegisterEnum("grpc.testing.ServerType", ServerType_name, ServerType_value) + proto.RegisterEnum("grpc.testing.RpcType", RpcType_name, RpcType_value) +} + +// Client API for TestService service + +type TestServiceClient interface { + // One request followed by one response. + // The server returns the client payload as-is. + UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) + // One request followed by one response. + // The server returns the client payload as-is. + StreamingCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingCallClient, error) +} + +type testServiceClient struct { + cc *grpc.ClientConn +} + +func NewTestServiceClient(cc *grpc.ClientConn) TestServiceClient { + return &testServiceClient{cc} +} + +func (c *testServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { + out := new(SimpleResponse) + err := grpc.Invoke(ctx, "/grpc.testing.TestService/UnaryCall", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *testServiceClient) StreamingCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingCallClient, error) { + stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[0], c.cc, "/grpc.testing.TestService/StreamingCall", opts...) + if err != nil { + return nil, err + } + x := &testServiceStreamingCallClient{stream} + return x, nil +} + +type TestService_StreamingCallClient interface { + Send(*SimpleRequest) error + Recv() (*SimpleResponse, error) + grpc.ClientStream +} + +type testServiceStreamingCallClient struct { + grpc.ClientStream +} + +func (x *testServiceStreamingCallClient) Send(m *SimpleRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *testServiceStreamingCallClient) Recv() (*SimpleResponse, error) { + m := new(SimpleResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for TestService service + +type TestServiceServer interface { + // One request followed by one response. + // The server returns the client payload as-is. + UnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error) + // One request followed by one response. + // The server returns the client payload as-is. + StreamingCall(TestService_StreamingCallServer) error +} + +func RegisterTestServiceServer(s *grpc.Server, srv TestServiceServer) { + s.RegisterService(&_TestService_serviceDesc, srv) +} + +func _TestService_UnaryCall_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { + in := new(SimpleRequest) + if err := codec.Unmarshal(buf, in); err != nil { + return nil, err + } + out, err := srv.(TestServiceServer).UnaryCall(ctx, in) + if err != nil { + return nil, err + } + return out, nil +} + +func _TestService_StreamingCall_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TestServiceServer).StreamingCall(&testServiceStreamingCallServer{stream}) +} + +type TestService_StreamingCallServer interface { + Send(*SimpleResponse) error + Recv() (*SimpleRequest, error) + grpc.ServerStream +} + +type testServiceStreamingCallServer struct { + grpc.ServerStream +} + +func (x *testServiceStreamingCallServer) Send(m *SimpleResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *testServiceStreamingCallServer) Recv() (*SimpleRequest, error) { + m := new(SimpleRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _TestService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.testing.TestService", + HandlerType: (*TestServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "UnaryCall", + Handler: _TestService_UnaryCall_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamingCall", + Handler: _TestService_StreamingCall_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, +} + +// Client API for Worker service + +type WorkerClient interface { + // Start test with specified workload + RunTest(ctx context.Context, opts ...grpc.CallOption) (Worker_RunTestClient, error) + // Start test with specified workload + RunServer(ctx context.Context, opts ...grpc.CallOption) (Worker_RunServerClient, error) +} + +type workerClient struct { + cc *grpc.ClientConn +} + +func NewWorkerClient(cc *grpc.ClientConn) WorkerClient { + return &workerClient{cc} +} + +func (c *workerClient) RunTest(ctx context.Context, opts ...grpc.CallOption) (Worker_RunTestClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Worker_serviceDesc.Streams[0], c.cc, "/grpc.testing.Worker/RunTest", opts...) + if err != nil { + return nil, err + } + x := &workerRunTestClient{stream} + return x, nil +} + +type Worker_RunTestClient interface { + Send(*ClientArgs) error + Recv() (*ClientStatus, error) + grpc.ClientStream +} + +type workerRunTestClient struct { + grpc.ClientStream +} + +func (x *workerRunTestClient) Send(m *ClientArgs) error { + return x.ClientStream.SendMsg(m) +} + +func (x *workerRunTestClient) Recv() (*ClientStatus, error) { + m := new(ClientStatus) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *workerClient) RunServer(ctx context.Context, opts ...grpc.CallOption) (Worker_RunServerClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Worker_serviceDesc.Streams[1], c.cc, "/grpc.testing.Worker/RunServer", opts...) + if err != nil { + return nil, err + } + x := &workerRunServerClient{stream} + return x, nil +} + +type Worker_RunServerClient interface { + Send(*ServerArgs) error + Recv() (*ServerStatus, error) + grpc.ClientStream +} + +type workerRunServerClient struct { + grpc.ClientStream +} + +func (x *workerRunServerClient) Send(m *ServerArgs) error { + return x.ClientStream.SendMsg(m) +} + +func (x *workerRunServerClient) Recv() (*ServerStatus, error) { + m := new(ServerStatus) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for Worker service + +type WorkerServer interface { + // Start test with specified workload + RunTest(Worker_RunTestServer) error + // Start test with specified workload + RunServer(Worker_RunServerServer) error +} + +func RegisterWorkerServer(s *grpc.Server, srv WorkerServer) { + s.RegisterService(&_Worker_serviceDesc, srv) +} + +func _Worker_RunTest_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(WorkerServer).RunTest(&workerRunTestServer{stream}) +} + +type Worker_RunTestServer interface { + Send(*ClientStatus) error + Recv() (*ClientArgs, error) + grpc.ServerStream +} + +type workerRunTestServer struct { + grpc.ServerStream +} + +func (x *workerRunTestServer) Send(m *ClientStatus) error { + return x.ServerStream.SendMsg(m) +} + +func (x *workerRunTestServer) Recv() (*ClientArgs, error) { + m := new(ClientArgs) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Worker_RunServer_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(WorkerServer).RunServer(&workerRunServerServer{stream}) +} + +type Worker_RunServerServer interface { + Send(*ServerStatus) error + Recv() (*ServerArgs, error) + grpc.ServerStream +} + +type workerRunServerServer struct { + grpc.ServerStream +} + +func (x *workerRunServerServer) Send(m *ServerStatus) error { + return x.ServerStream.SendMsg(m) +} + +func (x *workerRunServerServer) Recv() (*ServerArgs, error) { + m := new(ServerArgs) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _Worker_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.testing.Worker", + HandlerType: (*WorkerServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "RunTest", + Handler: _Worker_RunTest_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "RunServer", + Handler: _Worker_RunServer_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/benchmark/grpc_testing/test.proto b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/grpc_testing/test.proto new file mode 100644 index 000000000..e3a27f861 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/grpc_testing/test.proto @@ -0,0 +1,148 @@ +// An integration test service that covers all the method signature permutations +// of unary/streaming requests/responses. +syntax = "proto3"; + +package grpc.testing; + +enum PayloadType { + // Compressable text format. + COMPRESSABLE = 0; + + // Uncompressable binary format. + UNCOMPRESSABLE = 1; + + // Randomly chosen from all other formats defined in this enum. + RANDOM = 2; +} + +message StatsRequest { + // run number + optional int32 test_num = 1; +} + +message ServerStats { + // wall clock time + double time_elapsed = 1; + + // user time used by the server process and threads + double time_user = 2; + + // server time used by the server process and all threads + double time_system = 3; +} + +message Payload { + // The type of data in body. + PayloadType type = 1; + // Primary contents of payload. + bytes body = 2; +} + +message HistogramData { + repeated uint32 bucket = 1; + double min_seen = 2; + double max_seen = 3; + double sum = 4; + double sum_of_squares = 5; + double count = 6; +} + +enum ClientType { + SYNCHRONOUS_CLIENT = 0; + ASYNC_CLIENT = 1; +} + +enum ServerType { + SYNCHRONOUS_SERVER = 0; + ASYNC_SERVER = 1; +} + +enum RpcType { + UNARY = 0; + STREAMING = 1; +} + +message ClientConfig { + repeated string server_targets = 1; + ClientType client_type = 2; + bool enable_ssl = 3; + int32 outstanding_rpcs_per_channel = 4; + int32 client_channels = 5; + int32 payload_size = 6; + // only for async client: + int32 async_client_threads = 7; + RpcType rpc_type = 8; +} + +// Request current stats +message Mark {} + +message ClientArgs { + oneof argtype { + ClientConfig setup = 1; + Mark mark = 2; + } +} + +message ClientStats { + HistogramData latencies = 1; + double time_elapsed = 3; + double time_user = 4; + double time_system = 5; +} + +message ClientStatus { + ClientStats stats = 1; +} + +message ServerConfig { + ServerType server_type = 1; + int32 threads = 2; + bool enable_ssl = 3; +} + +message ServerArgs { + oneof argtype { + ServerConfig setup = 1; + Mark mark = 2; + } +} + +message ServerStatus { + ServerStats stats = 1; + int32 port = 2; +} + +message SimpleRequest { + // Desired payload type in the response from the server. + // If response_type is RANDOM, server randomly chooses one from other formats. + PayloadType response_type = 1; + + // Desired payload size in the response from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + int32 response_size = 2; + + // Optional input payload sent along with the request. + Payload payload = 3; +} + +message SimpleResponse { + Payload payload = 1; +} + +service TestService { + // One request followed by one response. + // The server returns the client payload as-is. + rpc UnaryCall(SimpleRequest) returns (SimpleResponse); + + // One request followed by one response. + // The server returns the client payload as-is. + rpc StreamingCall(stream SimpleRequest) returns (stream SimpleResponse); +} + +service Worker { + // Start test with specified workload + rpc RunTest(stream ClientArgs) returns (stream ClientStatus); + // Start test with specified workload + rpc RunServer(stream ServerArgs) returns (stream ServerStatus); +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/benchmark/server/main.go b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/server/main.go new file mode 100644 index 000000000..090f002fa --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/server/main.go @@ -0,0 +1,35 @@ +package main + +import ( + "flag" + "math" + "net" + "net/http" + _ "net/http/pprof" + "time" + + "google.golang.org/grpc/benchmark" + "google.golang.org/grpc/grpclog" +) + +var ( + duration = flag.Int("duration", math.MaxInt32, "The duration in seconds to run the benchmark server") +) + +func main() { + flag.Parse() + go func() { + lis, err := net.Listen("tcp", ":0") + if err != nil { + grpclog.Fatalf("Failed to listen: %v", err) + } + grpclog.Println("Server profiling address: ", lis.Addr().String()) + if err := http.Serve(lis, nil); err != nil { + grpclog.Fatalf("Failed to serve: %v", err) + } + }() + addr, stopper := benchmark.StartServer(":0") // listen on all interfaces + grpclog.Println("Server Address: ", addr) + <-time.After(time.Duration(*duration) * time.Second) + stopper() +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/benchmark/stats/counter.go b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/stats/counter.go new file mode 100644 index 000000000..4389bae38 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/stats/counter.go @@ -0,0 +1,135 @@ +package stats + +import ( + "sync" + "time" +) + +var ( + // TimeNow is used for testing. + TimeNow = time.Now +) + +const ( + hour = 0 + tenminutes = 1 + minute = 2 +) + +// Counter is a counter that keeps track of its recent values over a given +// period of time, and with a given resolution. Use newCounter() to instantiate. +type Counter struct { + mu sync.RWMutex + ts [3]*timeseries + lastUpdate time.Time +} + +// newCounter returns a new Counter. +func newCounter() *Counter { + now := TimeNow() + c := &Counter{} + c.ts[hour] = newTimeSeries(now, time.Hour, time.Minute) + c.ts[tenminutes] = newTimeSeries(now, 10*time.Minute, 10*time.Second) + c.ts[minute] = newTimeSeries(now, time.Minute, time.Second) + return c +} + +func (c *Counter) advance() time.Time { + now := TimeNow() + for _, ts := range c.ts { + ts.advanceTime(now) + } + return now +} + +// Value returns the current value of the counter. +func (c *Counter) Value() int64 { + c.mu.RLock() + defer c.mu.RUnlock() + return c.ts[minute].headValue() +} + +// LastUpdate returns the last update time of the counter. +func (c *Counter) LastUpdate() time.Time { + c.mu.RLock() + defer c.mu.RUnlock() + return c.lastUpdate +} + +// Set updates the current value of the counter. +func (c *Counter) Set(value int64) { + c.mu.Lock() + defer c.mu.Unlock() + c.lastUpdate = c.advance() + for _, ts := range c.ts { + ts.set(value) + } +} + +// Incr increments the current value of the counter by 'delta'. +func (c *Counter) Incr(delta int64) { + c.mu.Lock() + defer c.mu.Unlock() + c.lastUpdate = c.advance() + for _, ts := range c.ts { + ts.incr(delta) + } +} + +// Delta1h returns the delta for the last hour. +func (c *Counter) Delta1h() int64 { + c.mu.RLock() + defer c.mu.RUnlock() + c.advance() + return c.ts[hour].delta() +} + +// Delta10m returns the delta for the last 10 minutes. +func (c *Counter) Delta10m() int64 { + c.mu.RLock() + defer c.mu.RUnlock() + c.advance() + return c.ts[tenminutes].delta() +} + +// Delta1m returns the delta for the last minute. +func (c *Counter) Delta1m() int64 { + c.mu.RLock() + defer c.mu.RUnlock() + c.advance() + return c.ts[minute].delta() +} + +// Rate1h returns the rate of change of the counter in the last hour. +func (c *Counter) Rate1h() float64 { + c.mu.RLock() + defer c.mu.RUnlock() + c.advance() + return c.ts[hour].rate() +} + +// Rate10m returns the rate of change of the counter in the last 10 minutes. +func (c *Counter) Rate10m() float64 { + c.mu.RLock() + defer c.mu.RUnlock() + c.advance() + return c.ts[tenminutes].rate() +} + +// Rate1m returns the rate of change of the counter in the last minute. +func (c *Counter) Rate1m() float64 { + c.mu.RLock() + defer c.mu.RUnlock() + c.advance() + return c.ts[minute].rate() +} + +// Reset resets the counter to an empty state. +func (c *Counter) Reset() { + c.mu.Lock() + defer c.mu.Unlock() + now := TimeNow() + for _, ts := range c.ts { + ts.reset(now) + } +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/benchmark/stats/histogram.go b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/stats/histogram.go new file mode 100644 index 000000000..727808c8b --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/stats/histogram.go @@ -0,0 +1,255 @@ +package stats + +import ( + "bytes" + "fmt" + "io" + "strconv" + "strings" + "time" +) + +// HistogramValue is the value of Histogram objects. +type HistogramValue struct { + // Count is the total number of values added to the histogram. + Count int64 + // Sum is the sum of all the values added to the histogram. + Sum int64 + // Min is the minimum of all the values added to the histogram. + Min int64 + // Max is the maximum of all the values added to the histogram. + Max int64 + // Buckets contains all the buckets of the histogram. + Buckets []HistogramBucket +} + +// HistogramBucket is one histogram bucket. +type HistogramBucket struct { + // LowBound is the lower bound of the bucket. + LowBound int64 + // Count is the number of values in the bucket. + Count int64 +} + +// Print writes textual output of the histogram values. +func (v HistogramValue) Print(w io.Writer) { + avg := float64(v.Sum) / float64(v.Count) + fmt.Fprintf(w, "Count: %d Min: %d Max: %d Avg: %.2f\n", v.Count, v.Min, v.Max, avg) + fmt.Fprintf(w, "%s\n", strings.Repeat("-", 60)) + if v.Count <= 0 { + return + } + + maxBucketDigitLen := len(strconv.FormatInt(v.Buckets[len(v.Buckets)-1].LowBound, 10)) + if maxBucketDigitLen < 3 { + // For "inf". + maxBucketDigitLen = 3 + } + maxCountDigitLen := len(strconv.FormatInt(v.Count, 10)) + percentMulti := 100 / float64(v.Count) + + accCount := int64(0) + for i, b := range v.Buckets { + fmt.Fprintf(w, "[%*d, ", maxBucketDigitLen, b.LowBound) + if i+1 < len(v.Buckets) { + fmt.Fprintf(w, "%*d)", maxBucketDigitLen, v.Buckets[i+1].LowBound) + } else { + fmt.Fprintf(w, "%*s)", maxBucketDigitLen, "inf") + } + + accCount += b.Count + fmt.Fprintf(w, " %*d %5.1f%% %5.1f%%", maxCountDigitLen, b.Count, float64(b.Count)*percentMulti, float64(accCount)*percentMulti) + + const barScale = 0.1 + barLength := int(float64(b.Count)*percentMulti*barScale + 0.5) + fmt.Fprintf(w, " %s\n", strings.Repeat("#", barLength)) + } +} + +// String returns the textual output of the histogram values as string. +func (v HistogramValue) String() string { + var b bytes.Buffer + v.Print(&b) + return b.String() +} + +// A Histogram accumulates values in the form of a histogram. The type of the +// values is int64, which is suitable for keeping track of things like RPC +// latency in milliseconds. New histogram objects should be obtained via the +// New() function. +type Histogram struct { + opts HistogramOptions + buckets []bucketInternal + count *Counter + sum *Counter + tracker *Tracker +} + +// HistogramOptions contains the parameters that define the histogram's buckets. +type HistogramOptions struct { + // NumBuckets is the number of buckets. + NumBuckets int + // GrowthFactor is the growth factor of the buckets. A value of 0.1 + // indicates that bucket N+1 will be 10% larger than bucket N. + GrowthFactor float64 + // SmallestBucketSize is the size of the first bucket. Bucket sizes are + // rounded down to the nearest integer. + SmallestBucketSize float64 + // MinValue is the lower bound of the first bucket. + MinValue int64 +} + +// bucketInternal is the internal representation of a bucket, which includes a +// rate counter. +type bucketInternal struct { + lowBound int64 + count *Counter +} + +// NewHistogram returns a pointer to a new Histogram object that was created +// with the provided options. +func NewHistogram(opts HistogramOptions) *Histogram { + if opts.NumBuckets == 0 { + opts.NumBuckets = 32 + } + if opts.SmallestBucketSize == 0.0 { + opts.SmallestBucketSize = 1.0 + } + h := Histogram{ + opts: opts, + buckets: make([]bucketInternal, opts.NumBuckets), + count: newCounter(), + sum: newCounter(), + tracker: newTracker(), + } + low := opts.MinValue + delta := opts.SmallestBucketSize + for i := 0; i < opts.NumBuckets; i++ { + h.buckets[i].lowBound = low + h.buckets[i].count = newCounter() + low = low + int64(delta) + delta = delta * (1.0 + opts.GrowthFactor) + } + return &h +} + +// Opts returns a copy of the options used to create the Histogram. +func (h *Histogram) Opts() HistogramOptions { + return h.opts +} + +// Add adds a value to the histogram. +func (h *Histogram) Add(value int64) error { + bucket, err := h.findBucket(value) + if err != nil { + return err + } + h.buckets[bucket].count.Incr(1) + h.count.Incr(1) + h.sum.Incr(value) + h.tracker.Push(value) + return nil +} + +// LastUpdate returns the time at which the object was last updated. +func (h *Histogram) LastUpdate() time.Time { + return h.count.LastUpdate() +} + +// Value returns the accumulated state of the histogram since it was created. +func (h *Histogram) Value() HistogramValue { + b := make([]HistogramBucket, len(h.buckets)) + for i, v := range h.buckets { + b[i] = HistogramBucket{ + LowBound: v.lowBound, + Count: v.count.Value(), + } + } + + v := HistogramValue{ + Count: h.count.Value(), + Sum: h.sum.Value(), + Min: h.tracker.Min(), + Max: h.tracker.Max(), + Buckets: b, + } + return v +} + +// Delta1h returns the change in the last hour. +func (h *Histogram) Delta1h() HistogramValue { + b := make([]HistogramBucket, len(h.buckets)) + for i, v := range h.buckets { + b[i] = HistogramBucket{ + LowBound: v.lowBound, + Count: v.count.Delta1h(), + } + } + + v := HistogramValue{ + Count: h.count.Delta1h(), + Sum: h.sum.Delta1h(), + Min: h.tracker.Min1h(), + Max: h.tracker.Max1h(), + Buckets: b, + } + return v +} + +// Delta10m returns the change in the last 10 minutes. +func (h *Histogram) Delta10m() HistogramValue { + b := make([]HistogramBucket, len(h.buckets)) + for i, v := range h.buckets { + b[i] = HistogramBucket{ + LowBound: v.lowBound, + Count: v.count.Delta10m(), + } + } + + v := HistogramValue{ + Count: h.count.Delta10m(), + Sum: h.sum.Delta10m(), + Min: h.tracker.Min10m(), + Max: h.tracker.Max10m(), + Buckets: b, + } + return v +} + +// Delta1m returns the change in the last 10 minutes. +func (h *Histogram) Delta1m() HistogramValue { + b := make([]HistogramBucket, len(h.buckets)) + for i, v := range h.buckets { + b[i] = HistogramBucket{ + LowBound: v.lowBound, + Count: v.count.Delta1m(), + } + } + + v := HistogramValue{ + Count: h.count.Delta1m(), + Sum: h.sum.Delta1m(), + Min: h.tracker.Min1m(), + Max: h.tracker.Max1m(), + Buckets: b, + } + return v +} + +// findBucket does a binary search to find in which bucket the value goes. +func (h *Histogram) findBucket(value int64) (int, error) { + lastBucket := len(h.buckets) - 1 + min, max := 0, lastBucket + for max >= min { + b := (min + max) / 2 + if value >= h.buckets[b].lowBound && (b == lastBucket || value < h.buckets[b+1].lowBound) { + return b, nil + } + if value < h.buckets[b].lowBound { + max = b - 1 + continue + } + min = b + 1 + } + return 0, fmt.Errorf("no bucket for value: %d", value) +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/benchmark/stats/stats.go b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/stats/stats.go new file mode 100644 index 000000000..4290ad77e --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/stats/stats.go @@ -0,0 +1,116 @@ +package stats + +import ( + "bytes" + "fmt" + "io" + "math" + "time" +) + +// Stats is a simple helper for gathering additional statistics like histogram +// during benchmarks. This is not thread safe. +type Stats struct { + numBuckets int + unit time.Duration + min, max int64 + histogram *Histogram + + durations durationSlice + dirty bool +} + +type durationSlice []time.Duration + +// NewStats creates a new Stats instance. If numBuckets is not positive, +// the default value (16) will be used. +func NewStats(numBuckets int) *Stats { + if numBuckets <= 0 { + numBuckets = 16 + } + return &Stats{ + // Use one more bucket for the last unbounded bucket. + numBuckets: numBuckets + 1, + durations: make(durationSlice, 0, 100000), + } +} + +// Add adds an elapsed time per operation to the stats. +func (stats *Stats) Add(d time.Duration) { + stats.durations = append(stats.durations, d) + stats.dirty = true +} + +// Clear resets the stats, removing all values. +func (stats *Stats) Clear() { + stats.durations = stats.durations[:0] + stats.histogram = nil + stats.dirty = false +} + +// maybeUpdate updates internal stat data if there was any newly added +// stats since this was updated. +func (stats *Stats) maybeUpdate() { + if !stats.dirty { + return + } + + stats.min = math.MaxInt64 + stats.max = 0 + for _, d := range stats.durations { + if stats.min > int64(d) { + stats.min = int64(d) + } + if stats.max < int64(d) { + stats.max = int64(d) + } + } + + // Use the largest unit that can represent the minimum time duration. + stats.unit = time.Nanosecond + for _, u := range []time.Duration{time.Microsecond, time.Millisecond, time.Second} { + if stats.min <= int64(u) { + break + } + stats.unit = u + } + + // Adjust the min/max according to the new unit. + stats.min /= int64(stats.unit) + stats.max /= int64(stats.unit) + numBuckets := stats.numBuckets + if n := int(stats.max - stats.min + 1); n < numBuckets { + numBuckets = n + } + stats.histogram = NewHistogram(HistogramOptions{ + NumBuckets: numBuckets, + // max(i.e., Nth lower bound) = min + (1 + growthFactor)^(numBuckets-2). + GrowthFactor: math.Pow(float64(stats.max-stats.min), 1/float64(stats.numBuckets-2)) - 1, + SmallestBucketSize: 1.0, + MinValue: stats.min}) + + for _, d := range stats.durations { + stats.histogram.Add(int64(d / stats.unit)) + } + + stats.dirty = false +} + +// Print writes textual output of the Stats. +func (stats *Stats) Print(w io.Writer) { + stats.maybeUpdate() + + if stats.histogram == nil { + fmt.Fprint(w, "Histogram (empty)\n") + } else { + fmt.Fprintf(w, "Histogram (unit: %s)\n", fmt.Sprintf("%v", stats.unit)[1:]) + stats.histogram.Value().Print(w) + } +} + +// String returns the textual output of the Stats as string. +func (stats *Stats) String() string { + var b bytes.Buffer + stats.Print(&b) + return b.String() +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/benchmark/stats/timeseries.go b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/stats/timeseries.go new file mode 100644 index 000000000..2ba18a4d4 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/stats/timeseries.go @@ -0,0 +1,154 @@ +package stats + +import ( + "math" + "time" +) + +// timeseries holds the history of a changing value over a predefined period of +// time. +type timeseries struct { + size int // The number of time slots. Equivalent to len(slots). + resolution time.Duration // The time resolution of each slot. + stepCount int64 // The number of intervals seen since creation. + head int // The position of the current time in slots. + time time.Time // The time at the beginning of the current time slot. + slots []int64 // A circular buffer of time slots. +} + +// newTimeSeries returns a newly allocated timeseries that covers the requested +// period with the given resolution. +func newTimeSeries(initialTime time.Time, period, resolution time.Duration) *timeseries { + size := int(period.Nanoseconds()/resolution.Nanoseconds()) + 1 + return ×eries{ + size: size, + resolution: resolution, + stepCount: 1, + time: initialTime, + slots: make([]int64, size), + } +} + +// advanceTimeWithFill moves the timeseries forward to time t and fills in any +// slots that get skipped in the process with the given value. Values older than +// the timeseries period are lost. +func (ts *timeseries) advanceTimeWithFill(t time.Time, value int64) { + advanceTo := t.Truncate(ts.resolution) + if !advanceTo.After(ts.time) { + // This is shortcut for the most common case of a busy counter + // where updates come in many times per ts.resolution. + ts.time = advanceTo + return + } + steps := int(advanceTo.Sub(ts.time).Nanoseconds() / ts.resolution.Nanoseconds()) + ts.stepCount += int64(steps) + if steps > ts.size { + steps = ts.size + } + for steps > 0 { + ts.head = (ts.head + 1) % ts.size + ts.slots[ts.head] = value + steps-- + } + ts.time = advanceTo +} + +// advanceTime moves the timeseries forward to time t and fills in any slots +// that get skipped in the process with the head value. Values older than the +// timeseries period are lost. +func (ts *timeseries) advanceTime(t time.Time) { + ts.advanceTimeWithFill(t, ts.slots[ts.head]) +} + +// set sets the current value of the timeseries. +func (ts *timeseries) set(value int64) { + ts.slots[ts.head] = value +} + +// incr sets the current value of the timeseries. +func (ts *timeseries) incr(delta int64) { + ts.slots[ts.head] += delta +} + +// headValue returns the latest value from the timeseries. +func (ts *timeseries) headValue() int64 { + return ts.slots[ts.head] +} + +// headTime returns the time of the latest value from the timeseries. +func (ts *timeseries) headTime() time.Time { + return ts.time +} + +// tailValue returns the oldest value from the timeseries. +func (ts *timeseries) tailValue() int64 { + if ts.stepCount < int64(ts.size) { + return 0 + } + return ts.slots[(ts.head+1)%ts.size] +} + +// tailTime returns the time of the oldest value from the timeseries. +func (ts *timeseries) tailTime() time.Time { + size := int64(ts.size) + if ts.stepCount < size { + size = ts.stepCount + } + return ts.time.Add(-time.Duration(size-1) * ts.resolution) +} + +// delta returns the difference between the newest and oldest values from the +// timeseries. +func (ts *timeseries) delta() int64 { + return ts.headValue() - ts.tailValue() +} + +// rate returns the rate of change between the oldest and newest values from +// the timeseries in units per second. +func (ts *timeseries) rate() float64 { + deltaTime := ts.headTime().Sub(ts.tailTime()).Seconds() + if deltaTime == 0 { + return 0 + } + return float64(ts.delta()) / deltaTime +} + +// min returns the smallest value from the timeseries. +func (ts *timeseries) min() int64 { + to := ts.size + if ts.stepCount < int64(ts.size) { + to = ts.head + 1 + } + tail := (ts.head + 1) % ts.size + min := int64(math.MaxInt64) + for b := 0; b < to; b++ { + if b != tail && ts.slots[b] < min { + min = ts.slots[b] + } + } + return min +} + +// max returns the largest value from the timeseries. +func (ts *timeseries) max() int64 { + to := ts.size + if ts.stepCount < int64(ts.size) { + to = ts.head + 1 + } + tail := (ts.head + 1) % ts.size + max := int64(math.MinInt64) + for b := 0; b < to; b++ { + if b != tail && ts.slots[b] > max { + max = ts.slots[b] + } + } + return max +} + +// reset resets the timeseries to an empty state. +func (ts *timeseries) reset(t time.Time) { + ts.head = 0 + ts.time = t + ts.stepCount = 1 + ts.slots = make([]int64, ts.size) +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/benchmark/stats/tracker.go b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/stats/tracker.go new file mode 100644 index 000000000..802f72957 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/stats/tracker.go @@ -0,0 +1,159 @@ +package stats + +import ( + "math" + "sync" + "time" +) + +// Tracker is a min/max value tracker that keeps track of its min/max values +// over a given period of time, and with a given resolution. The initial min +// and max values are math.MaxInt64 and math.MinInt64 respectively. +type Tracker struct { + mu sync.RWMutex + min, max int64 // All time min/max. + minTS, maxTS [3]*timeseries + lastUpdate time.Time +} + +// newTracker returns a new Tracker. +func newTracker() *Tracker { + now := TimeNow() + t := &Tracker{} + t.minTS[hour] = newTimeSeries(now, time.Hour, time.Minute) + t.minTS[tenminutes] = newTimeSeries(now, 10*time.Minute, 10*time.Second) + t.minTS[minute] = newTimeSeries(now, time.Minute, time.Second) + t.maxTS[hour] = newTimeSeries(now, time.Hour, time.Minute) + t.maxTS[tenminutes] = newTimeSeries(now, 10*time.Minute, 10*time.Second) + t.maxTS[minute] = newTimeSeries(now, time.Minute, time.Second) + t.init() + return t +} + +func (t *Tracker) init() { + t.min = math.MaxInt64 + t.max = math.MinInt64 + for _, ts := range t.minTS { + ts.set(math.MaxInt64) + } + for _, ts := range t.maxTS { + ts.set(math.MinInt64) + } +} + +func (t *Tracker) advance() time.Time { + now := TimeNow() + for _, ts := range t.minTS { + ts.advanceTimeWithFill(now, math.MaxInt64) + } + for _, ts := range t.maxTS { + ts.advanceTimeWithFill(now, math.MinInt64) + } + return now +} + +// LastUpdate returns the last update time of the range. +func (t *Tracker) LastUpdate() time.Time { + t.mu.RLock() + defer t.mu.RUnlock() + return t.lastUpdate +} + +// Push adds a new value if it is a new minimum or maximum. +func (t *Tracker) Push(value int64) { + t.mu.Lock() + defer t.mu.Unlock() + t.lastUpdate = t.advance() + if t.min > value { + t.min = value + } + if t.max < value { + t.max = value + } + for _, ts := range t.minTS { + if ts.headValue() > value { + ts.set(value) + } + } + for _, ts := range t.maxTS { + if ts.headValue() < value { + ts.set(value) + } + } +} + +// Min returns the minimum value of the tracker +func (t *Tracker) Min() int64 { + t.mu.RLock() + defer t.mu.RUnlock() + return t.min +} + +// Max returns the maximum value of the tracker. +func (t *Tracker) Max() int64 { + t.mu.RLock() + defer t.mu.RUnlock() + return t.max +} + +// Min1h returns the minimum value for the last hour. +func (t *Tracker) Min1h() int64 { + t.mu.Lock() + defer t.mu.Unlock() + t.advance() + return t.minTS[hour].min() +} + +// Max1h returns the maximum value for the last hour. +func (t *Tracker) Max1h() int64 { + t.mu.Lock() + defer t.mu.Unlock() + t.advance() + return t.maxTS[hour].max() +} + +// Min10m returns the minimum value for the last 10 minutes. +func (t *Tracker) Min10m() int64 { + t.mu.Lock() + defer t.mu.Unlock() + t.advance() + return t.minTS[tenminutes].min() +} + +// Max10m returns the maximum value for the last 10 minutes. +func (t *Tracker) Max10m() int64 { + t.mu.Lock() + defer t.mu.Unlock() + t.advance() + return t.maxTS[tenminutes].max() +} + +// Min1m returns the minimum value for the last 1 minute. +func (t *Tracker) Min1m() int64 { + t.mu.Lock() + defer t.mu.Unlock() + t.advance() + return t.minTS[minute].min() +} + +// Max1m returns the maximum value for the last 1 minute. +func (t *Tracker) Max1m() int64 { + t.mu.Lock() + defer t.mu.Unlock() + t.advance() + return t.maxTS[minute].max() +} + +// Reset resets the range to an empty state. +func (t *Tracker) Reset() { + t.mu.Lock() + defer t.mu.Unlock() + now := TimeNow() + for _, ts := range t.minTS { + ts.reset(now) + } + for _, ts := range t.maxTS { + ts.reset(now) + } + t.init() +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/benchmark/stats/util.go b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/stats/util.go new file mode 100644 index 000000000..a9922f985 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/stats/util.go @@ -0,0 +1,191 @@ +package stats + +import ( + "bufio" + "bytes" + "fmt" + "os" + "runtime" + "sort" + "strings" + "sync" + "testing" +) + +var ( + curB *testing.B + curBenchName string + curStats map[string]*Stats + + orgStdout *os.File + nextOutPos int + + injectCond *sync.Cond + injectDone chan struct{} +) + +// AddStats adds a new unnamed Stats instance to the current benchmark. You need +// to run benchmarks by calling RunTestMain() to inject the stats to the +// benchmark results. If numBuckets is not positive, the default value (16) will +// be used. Please note that this calls b.ResetTimer() since it may be blocked +// until the previous benchmark stats is printed out. So AddStats() should +// typically be called at the very beginning of each benchmark function. +func AddStats(b *testing.B, numBuckets int) *Stats { + return AddStatsWithName(b, "", numBuckets) +} + +// AddStatsWithName adds a new named Stats instance to the current benchmark. +// With this, you can add multiple stats in a single benchmark. You need +// to run benchmarks by calling RunTestMain() to inject the stats to the +// benchmark results. If numBuckets is not positive, the default value (16) will +// be used. Please note that this calls b.ResetTimer() since it may be blocked +// until the previous benchmark stats is printed out. So AddStatsWithName() +// should typically be called at the very beginning of each benchmark function. +func AddStatsWithName(b *testing.B, name string, numBuckets int) *Stats { + var benchName string + for i := 1; ; i++ { + pc, _, _, ok := runtime.Caller(i) + if !ok { + panic("benchmark function not found") + } + p := strings.Split(runtime.FuncForPC(pc).Name(), ".") + benchName = p[len(p)-1] + if strings.HasPrefix(benchName, "Benchmark") { + break + } + } + procs := runtime.GOMAXPROCS(-1) + if procs != 1 { + benchName = fmt.Sprintf("%s-%d", benchName, procs) + } + + stats := NewStats(numBuckets) + + if injectCond != nil { + // We need to wait until the previous benchmark stats is printed out. + injectCond.L.Lock() + for curB != nil && curBenchName != benchName { + injectCond.Wait() + } + + curB = b + curBenchName = benchName + curStats[name] = stats + + injectCond.L.Unlock() + } + + b.ResetTimer() + return stats +} + +// RunTestMain runs the tests with enabling injection of benchmark stats. It +// returns an exit code to pass to os.Exit. +func RunTestMain(m *testing.M) int { + startStatsInjector() + defer stopStatsInjector() + return m.Run() +} + +// startStatsInjector starts stats injection to benchmark results. +func startStatsInjector() { + orgStdout = os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + nextOutPos = 0 + + resetCurBenchStats() + + injectCond = sync.NewCond(&sync.Mutex{}) + injectDone = make(chan struct{}) + go func() { + defer close(injectDone) + + scanner := bufio.NewScanner(r) + scanner.Split(splitLines) + for scanner.Scan() { + injectStatsIfFinished(scanner.Text()) + } + if err := scanner.Err(); err != nil { + panic(err) + } + }() +} + +// stopStatsInjector stops stats injection and restores os.Stdout. +func stopStatsInjector() { + os.Stdout.Close() + <-injectDone + injectCond = nil + os.Stdout = orgStdout +} + +// splitLines is a split function for a bufio.Scanner that returns each line +// of text, teeing texts to the original stdout even before each line ends. +func splitLines(data []byte, eof bool) (advance int, token []byte, err error) { + if eof && len(data) == 0 { + return 0, nil, nil + } + + if i := bytes.IndexByte(data, '\n'); i >= 0 { + orgStdout.Write(data[nextOutPos : i+1]) + nextOutPos = 0 + return i + 1, data[0:i], nil + } + + orgStdout.Write(data[nextOutPos:]) + nextOutPos = len(data) + + if eof { + // This is a final, non-terminated line. Return it. + return len(data), data, nil + } + + return 0, nil, nil +} + +// injectStatsIfFinished prints out the stats if the current benchmark finishes. +func injectStatsIfFinished(line string) { + injectCond.L.Lock() + defer injectCond.L.Unlock() + + // We assume that the benchmark results start with the benchmark name. + if curB == nil || !strings.HasPrefix(line, curBenchName) { + return + } + + if !curB.Failed() { + // Output all stats in alphabetical order. + names := make([]string, 0, len(curStats)) + for name := range curStats { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + stats := curStats[name] + // The output of stats starts with a header like "Histogram (unit: ms)" + // followed by statistical properties and the buckets. Add the stats name + // if it is a named stats and indent them as Go testing outputs. + lines := strings.Split(stats.String(), "\n") + if n := len(lines); n > 0 { + if name != "" { + name = ": " + name + } + fmt.Fprintf(orgStdout, "--- %s%s\n", lines[0], name) + for _, line := range lines[1 : n-1] { + fmt.Fprintf(orgStdout, "\t%s\n", line) + } + } + } + } + + resetCurBenchStats() + injectCond.Signal() +} + +// resetCurBenchStats resets the current benchmark stats. +func resetCurBenchStats() { + curB = nil + curBenchName = "" + curStats = make(map[string]*Stats) +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/call.go b/Godeps/_workspace/src/google.golang.org/grpc/call.go new file mode 100644 index 000000000..63b7966c1 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/call.go @@ -0,0 +1,194 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "io" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/trace" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/transport" +) + +// recvResponse receives and parses an RPC response. +// On error, it returns the error and indicates whether the call should be retried. +// +// TODO(zhaoq): Check whether the received message sequence is valid. +func recvResponse(codec Codec, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) error { + // Try to acquire header metadata from the server if there is any. + var err error + c.headerMD, err = stream.Header() + if err != nil { + return err + } + p := &parser{s: stream} + for { + if err = recv(p, codec, reply); err != nil { + if err == io.EOF { + break + } + return err + } + } + c.trailerMD = stream.Trailer() + return nil +} + +// sendRequest writes out various information of an RPC such as Context and Message. +func sendRequest(ctx context.Context, codec Codec, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) { + stream, err := t.NewStream(ctx, callHdr) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + if _, ok := err.(transport.ConnectionError); !ok { + t.CloseStream(stream, err) + } + } + }() + // TODO(zhaoq): Support compression. + outBuf, err := encode(codec, args, compressionNone) + if err != nil { + return nil, transport.StreamErrorf(codes.Internal, "grpc: %v", err) + } + err = t.Write(stream, outBuf, opts) + if err != nil { + return nil, err + } + // Sent successfully. + return stream, nil +} + +// callInfo contains all related configuration and information about an RPC. +type callInfo struct { + failFast bool + headerMD metadata.MD + trailerMD metadata.MD + traceInfo traceInfo // in trace.go +} + +// Invoke is called by the generated code. It sends the RPC request on the +// wire and returns after response is received. +func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (err error) { + var c callInfo + for _, o := range opts { + if err := o.before(&c); err != nil { + return toRPCErr(err) + } + } + defer func() { + for _, o := range opts { + o.after(&c) + } + }() + + if EnableTracing { + c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) + defer c.traceInfo.tr.Finish() + c.traceInfo.firstLine.client = true + if deadline, ok := ctx.Deadline(); ok { + c.traceInfo.firstLine.deadline = deadline.Sub(time.Now()) + } + c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false) + // TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set. + defer func() { + if err != nil { + c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + c.traceInfo.tr.SetError() + } + }() + } + callHdr := &transport.CallHdr{ + Host: cc.authority, + Method: method, + } + topts := &transport.Options{ + Last: true, + Delay: false, + } + var ( + ts int // track the transport sequence number + lastErr error // record the error that happened + ) + for { + var ( + err error + t transport.ClientTransport + stream *transport.Stream + ) + // TODO(zhaoq): Need a formal spec of retry strategy for non-failfast rpcs. + if lastErr != nil && c.failFast { + return toRPCErr(lastErr) + } + t, ts, err = cc.wait(ctx, ts) + if err != nil { + if lastErr != nil { + // This was a retry; return the error from the last attempt. + return toRPCErr(lastErr) + } + return toRPCErr(err) + } + if c.traceInfo.tr != nil { + c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true) + } + stream, err = sendRequest(ctx, cc.dopts.codec, callHdr, t, args, topts) + if err != nil { + if _, ok := err.(transport.ConnectionError); ok { + lastErr = err + continue + } + if lastErr != nil { + return toRPCErr(lastErr) + } + return toRPCErr(err) + } + // Receive the response + lastErr = recvResponse(cc.dopts.codec, t, &c, stream, reply) + if _, ok := lastErr.(transport.ConnectionError); ok { + continue + } + if c.traceInfo.tr != nil { + c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true) + } + t.CloseStream(stream, lastErr) + if lastErr != nil { + return toRPCErr(lastErr) + } + return Errorf(stream.StatusCode(), stream.StatusDesc()) + } +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/clientconn.go b/Godeps/_workspace/src/google.golang.org/grpc/clientconn.go new file mode 100644 index 000000000..95540e4d8 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/clientconn.go @@ -0,0 +1,460 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "errors" + "fmt" + "net" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/transport" +) + +var ( + // ErrUnspecTarget indicates that the target address is unspecified. + ErrUnspecTarget = errors.New("grpc: target is unspecified") + // ErrNoTransportSecurity indicates that there is no transport security + // being set for ClientConn. Users should either set one or explicityly + // call WithInsecure DialOption to disable security. + ErrNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") + // ErrCredentialsMisuse indicates that users want to transmit security infomation + // (e.g., oauth2 token) which requires secure connection on an insecure + // connection. + ErrCredentialsMisuse = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportAuthenticator() to set)") + // ErrClientConnClosing indicates that the operation is illegal because + // the session is closing. + ErrClientConnClosing = errors.New("grpc: the client connection is closing") + // ErrClientConnTimeout indicates that the connection could not be + // established or re-established within the specified timeout. + ErrClientConnTimeout = errors.New("grpc: timed out trying to connect") + // minimum time to give a connection to complete + minConnectTimeout = 20 * time.Second +) + +// dialOptions configure a Dial call. dialOptions are set by the DialOption +// values passed to Dial. +type dialOptions struct { + codec Codec + block bool + insecure bool + copts transport.ConnectOptions +} + +// DialOption configures how we set up the connection. +type DialOption func(*dialOptions) + +// WithCodec returns a DialOption which sets a codec for message marshaling and unmarshaling. +func WithCodec(c Codec) DialOption { + return func(o *dialOptions) { + o.codec = c + } +} + +// WithBlock returns a DialOption which makes caller of Dial blocks until the underlying +// connection is up. Without this, Dial returns immediately and connecting the server +// happens in background. +func WithBlock() DialOption { + return func(o *dialOptions) { + o.block = true + } +} + +func WithInsecure() DialOption { + return func(o *dialOptions) { + o.insecure = true + } +} + +// WithTransportCredentials returns a DialOption which configures a +// connection level security credentials (e.g., TLS/SSL). +func WithTransportCredentials(creds credentials.TransportAuthenticator) DialOption { + return func(o *dialOptions) { + o.copts.AuthOptions = append(o.copts.AuthOptions, creds) + } +} + +// WithPerRPCCredentials returns a DialOption which sets +// credentials which will place auth state on each outbound RPC. +func WithPerRPCCredentials(creds credentials.Credentials) DialOption { + return func(o *dialOptions) { + o.copts.AuthOptions = append(o.copts.AuthOptions, creds) + } +} + +// WithTimeout returns a DialOption that configures a timeout for dialing a client connection. +func WithTimeout(d time.Duration) DialOption { + return func(o *dialOptions) { + o.copts.Timeout = d + } +} + +// WithDialer returns a DialOption that specifies a function to use for dialing network addresses. +func WithDialer(f func(addr string, timeout time.Duration) (net.Conn, error)) DialOption { + return func(o *dialOptions) { + o.copts.Dialer = f + } +} + +// WithUserAgent returns a DialOption that specifies a user agent string for all the RPCs. +func WithUserAgent(s string) DialOption { + return func(o *dialOptions) { + o.copts.UserAgent = s + } +} + +// Dial creates a client connection the given target. +func Dial(target string, opts ...DialOption) (*ClientConn, error) { + if target == "" { + return nil, ErrUnspecTarget + } + cc := &ClientConn{ + target: target, + shutdownChan: make(chan struct{}), + } + for _, opt := range opts { + opt(&cc.dopts) + } + if !cc.dopts.insecure { + var ok bool + for _, c := range cc.dopts.copts.AuthOptions { + if _, ok := c.(credentials.TransportAuthenticator); !ok { + continue + } + ok = true + } + if !ok { + return nil, ErrNoTransportSecurity + } + } else { + for _, c := range cc.dopts.copts.AuthOptions { + if c.RequireTransportSecurity() { + return nil, ErrCredentialsMisuse + } + } + } + colonPos := strings.LastIndex(target, ":") + if colonPos == -1 { + colonPos = len(target) + } + cc.authority = target[:colonPos] + if cc.dopts.codec == nil { + // Set the default codec. + cc.dopts.codec = protoCodec{} + } + cc.stateCV = sync.NewCond(&cc.mu) + if cc.dopts.block { + if err := cc.resetTransport(false); err != nil { + cc.Close() + return nil, err + } + // Start to monitor the error status of transport. + go cc.transportMonitor() + } else { + // Start a goroutine connecting to the server asynchronously. + go func() { + if err := cc.resetTransport(false); err != nil { + grpclog.Printf("Failed to dial %s: %v; please retry.", target, err) + cc.Close() + return + } + go cc.transportMonitor() + }() + } + return cc, nil +} + +// ConnectivityState indicates the state of a client connection. +type ConnectivityState int + +const ( + // Idle indicates the ClientConn is idle. + Idle ConnectivityState = iota + // Connecting indicates the ClienConn is connecting. + Connecting + // Ready indicates the ClientConn is ready for work. + Ready + // TransientFailure indicates the ClientConn has seen a failure but expects to recover. + TransientFailure + // Shutdown indicates the ClientConn has started shutting down. + Shutdown +) + +func (s ConnectivityState) String() string { + switch s { + case Idle: + return "IDLE" + case Connecting: + return "CONNECTING" + case Ready: + return "READY" + case TransientFailure: + return "TRANSIENT_FAILURE" + case Shutdown: + return "SHUTDOWN" + default: + panic(fmt.Sprintf("unknown connectivity state: %d", s)) + } +} + +// ClientConn represents a client connection to an RPC service. +type ClientConn struct { + target string + authority string + dopts dialOptions + shutdownChan chan struct{} + + mu sync.Mutex + state ConnectivityState + stateCV *sync.Cond + // ready is closed and becomes nil when a new transport is up or failed + // due to timeout. + ready chan struct{} + // Every time a new transport is created, this is incremented by 1. Used + // to avoid trying to recreate a transport while the new one is already + // under construction. + transportSeq int + transport transport.ClientTransport +} + +// State returns the connectivity state of the ClientConn +func (cc *ClientConn) State() ConnectivityState { + cc.mu.Lock() + defer cc.mu.Unlock() + return cc.state +} + +// WaitForStateChange blocks until the state changes to something other than the sourceState +// or timeout fires. It returns false if timeout fires and true otherwise. +func (cc *ClientConn) WaitForStateChange(timeout time.Duration, sourceState ConnectivityState) bool { + start := time.Now() + cc.mu.Lock() + defer cc.mu.Unlock() + if sourceState != cc.state { + return true + } + expired := timeout <= time.Since(start) + if expired { + return false + } + done := make(chan struct{}) + go func() { + select { + case <-time.After(timeout - time.Since(start)): + cc.mu.Lock() + expired = true + cc.stateCV.Broadcast() + cc.mu.Unlock() + case <-done: + } + }() + defer close(done) + for sourceState == cc.state { + cc.stateCV.Wait() + if expired { + return false + } + } + return true +} + +func (cc *ClientConn) resetTransport(closeTransport bool) error { + var retries int + start := time.Now() + cc.mu.Lock() + ts := cc.transportSeq + // Avoid wait() picking up a dying transport unnecessarily. + cc.transportSeq = 0 + cc.mu.Unlock() + for { + cc.mu.Lock() + cc.state = Connecting + cc.stateCV.Broadcast() + t := cc.transport + if cc.state == Shutdown { + cc.mu.Unlock() + return ErrClientConnClosing + } + cc.mu.Unlock() + if closeTransport { + t.Close() + } + // Adjust timeout for the current try. + copts := cc.dopts.copts + if copts.Timeout < 0 { + cc.Close() + return ErrClientConnTimeout + } + if copts.Timeout > 0 { + copts.Timeout -= time.Since(start) + if copts.Timeout <= 0 { + cc.Close() + return ErrClientConnTimeout + } + } + sleepTime := backoff(retries) + timeout := sleepTime + if timeout < minConnectTimeout { + timeout = minConnectTimeout + } + if copts.Timeout == 0 || copts.Timeout > timeout { + copts.Timeout = timeout + } + connectTime := time.Now() + newTransport, err := transport.NewClientTransport(cc.target, &copts) + if err != nil { + cc.mu.Lock() + cc.state = TransientFailure + cc.stateCV.Broadcast() + cc.mu.Unlock() + sleepTime -= time.Since(connectTime) + if sleepTime < 0 { + sleepTime = 0 + } + // Fail early before falling into sleep. + if cc.dopts.copts.Timeout > 0 && cc.dopts.copts.Timeout < sleepTime+time.Since(start) { + cc.Close() + return ErrClientConnTimeout + } + closeTransport = false + time.Sleep(sleepTime) + retries++ + grpclog.Printf("grpc: ClientConn.resetTransport failed to create client transport: %v; Reconnecting to %q", err, cc.target) + continue + } + cc.mu.Lock() + if cc.state == Shutdown { + // cc.Close() has been invoked. + cc.mu.Unlock() + newTransport.Close() + return ErrClientConnClosing + } + cc.state = Ready + cc.stateCV.Broadcast() + cc.transport = newTransport + cc.transportSeq = ts + 1 + if cc.ready != nil { + close(cc.ready) + cc.ready = nil + } + cc.mu.Unlock() + return nil + } +} + +// Run in a goroutine to track the error in transport and create the +// new transport if an error happens. It returns when the channel is closing. +func (cc *ClientConn) transportMonitor() { + for { + select { + // shutdownChan is needed to detect the teardown when + // the ClientConn is idle (i.e., no RPC in flight). + case <-cc.shutdownChan: + return + case <-cc.transport.Error(): + cc.mu.Lock() + cc.state = TransientFailure + cc.stateCV.Broadcast() + cc.mu.Unlock() + if err := cc.resetTransport(true); err != nil { + // The ClientConn is closing. + grpclog.Printf("grpc: ClientConn.transportMonitor exits due to: %v", err) + return + } + continue + } + } +} + +// When wait returns, either the new transport is up or ClientConn is +// closing. Used to avoid working on a dying transport. It updates and +// returns the transport and its version when there is no error. +func (cc *ClientConn) wait(ctx context.Context, ts int) (transport.ClientTransport, int, error) { + for { + cc.mu.Lock() + switch { + case cc.state == Shutdown: + cc.mu.Unlock() + return nil, 0, ErrClientConnClosing + case ts < cc.transportSeq: + // Worked on a dying transport. Try the new one immediately. + defer cc.mu.Unlock() + return cc.transport, cc.transportSeq, nil + default: + ready := cc.ready + if ready == nil { + ready = make(chan struct{}) + cc.ready = ready + } + cc.mu.Unlock() + select { + case <-ctx.Done(): + return nil, 0, transport.ContextErr(ctx.Err()) + // Wait until the new transport is ready or failed. + case <-ready: + } + } + } +} + +// Close starts to tear down the ClientConn. Returns ErrClientConnClosing if +// it has been closed (mostly due to dial time-out). +// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in +// some edge cases (e.g., the caller opens and closes many ClientConn's in a +// tight loop. +func (cc *ClientConn) Close() error { + cc.mu.Lock() + defer cc.mu.Unlock() + if cc.state == Shutdown { + return ErrClientConnClosing + } + cc.state = Shutdown + cc.stateCV.Broadcast() + if cc.ready != nil { + close(cc.ready) + cc.ready = nil + } + if cc.transport != nil { + cc.transport.Close() + } + if cc.shutdownChan != nil { + close(cc.shutdownChan) + } + return nil +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/codegen.sh b/Godeps/_workspace/src/google.golang.org/grpc/codegen.sh new file mode 100644 index 000000000..b00948884 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/codegen.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +# This script serves as an example to demonstrate how to generate the gRPC-Go +# interface and the related messages from .proto file. +# +# It assumes the installation of i) Google proto buffer compiler at +# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen +# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have +# not, please install them first. +# +# We recommend running this script at $GOPATH/src. +# +# If this is not what you need, feel free to make your own scripts. Again, this +# script is for demonstration purpose. +# +proto=$1 +protoc --go_out=plugins=grpc:. $proto diff --git a/Godeps/_workspace/src/google.golang.org/grpc/codes/code_string.go b/Godeps/_workspace/src/google.golang.org/grpc/codes/code_string.go new file mode 100644 index 000000000..e6762d084 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/codes/code_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type=Code; DO NOT EDIT + +package codes + +import "fmt" + +const _Code_name = "OKCanceledUnknownInvalidArgumentDeadlineExceededNotFoundAlreadyExistsPermissionDeniedResourceExhaustedFailedPreconditionAbortedOutOfRangeUnimplementedInternalUnavailableDataLossUnauthenticated" + +var _Code_index = [...]uint8{0, 2, 10, 17, 32, 48, 56, 69, 85, 102, 120, 127, 137, 150, 158, 169, 177, 192} + +func (i Code) String() string { + if i+1 >= Code(len(_Code_index)) { + return fmt.Sprintf("Code(%d)", i) + } + return _Code_name[_Code_index[i]:_Code_index[i+1]] +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/codes/codes.go b/Godeps/_workspace/src/google.golang.org/grpc/codes/codes.go new file mode 100644 index 000000000..37c5b860b --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/codes/codes.go @@ -0,0 +1,159 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// Package codes defines the canonical error codes used by gRPC. It is +// consistent across various languages. +package codes + +// A Code is an unsigned 32-bit error code as defined in the gRPC spec. +type Code uint32 + +//go:generate stringer -type=Code + +const ( + // OK is returned on success. + OK Code = 0 + + // Canceled indicates the operation was cancelled (typically by the caller). + Canceled Code = 1 + + // Unknown error. An example of where this error may be returned is + // if a Status value received from another address space belongs to + // an error-space that is not known in this address space. Also + // errors raised by APIs that do not return enough error information + // may be converted to this error. + Unknown Code = 2 + + // InvalidArgument indicates client specified an invalid argument. + // Note that this differs from FailedPrecondition. It indicates arguments + // that are problematic regardless of the state of the system + // (e.g., a malformed file name). + InvalidArgument Code = 3 + + // DeadlineExceeded means operation expired before completion. + // For operations that change the state of the system, this error may be + // returned even if the operation has completed successfully. For + // example, a successful response from a server could have been delayed + // long enough for the deadline to expire. + DeadlineExceeded Code = 4 + + // NotFound means some requested entity (e.g., file or directory) was + // not found. + NotFound Code = 5 + + // AlreadyExists means an attempt to create an entity failed because one + // already exists. + AlreadyExists Code = 6 + + // PermissionDenied indicates the caller does not have permission to + // execute the specified operation. It must not be used for rejections + // caused by exhausting some resource (use ResourceExhausted + // instead for those errors). It must not be + // used if the caller cannot be identified (use Unauthenticated + // instead for those errors). + PermissionDenied Code = 7 + + // Unauthenticated indicates the request does not have valid + // authentication credentials for the operation. + Unauthenticated Code = 16 + + // ResourceExhausted indicates some resource has been exhausted, perhaps + // a per-user quota, or perhaps the entire file system is out of space. + ResourceExhausted Code = 8 + + // FailedPrecondition indicates operation was rejected because the + // system is not in a state required for the operation's execution. + // For example, directory to be deleted may be non-empty, an rmdir + // operation is applied to a non-directory, etc. + // + // A litmus test that may help a service implementor in deciding + // between FailedPrecondition, Aborted, and Unavailable: + // (a) Use Unavailable if the client can retry just the failing call. + // (b) Use Aborted if the client should retry at a higher-level + // (e.g., restarting a read-modify-write sequence). + // (c) Use FailedPrecondition if the client should not retry until + // the system state has been explicitly fixed. E.g., if an "rmdir" + // fails because the directory is non-empty, FailedPrecondition + // should be returned since the client should not retry unless + // they have first fixed up the directory by deleting files from it. + // (d) Use FailedPrecondition if the client performs conditional + // REST Get/Update/Delete on a resource and the resource on the + // server does not match the condition. E.g., conflicting + // read-modify-write on the same resource. + FailedPrecondition Code = 9 + + // Aborted indicates the operation was aborted, typically due to a + // concurrency issue like sequencer check failures, transaction aborts, + // etc. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + Aborted Code = 10 + + // OutOfRange means operation was attempted past the valid range. + // E.g., seeking or reading past end of file. + // + // Unlike InvalidArgument, this error indicates a problem that may + // be fixed if the system state changes. For example, a 32-bit file + // system will generate InvalidArgument if asked to read at an + // offset that is not in the range [0,2^32-1], but it will generate + // OutOfRange if asked to read from an offset past the current + // file size. + // + // There is a fair bit of overlap between FailedPrecondition and + // OutOfRange. We recommend using OutOfRange (the more specific + // error) when it applies so that callers who are iterating through + // a space can easily look for an OutOfRange error to detect when + // they are done. + OutOfRange Code = 11 + + // Unimplemented indicates operation is not implemented or not + // supported/enabled in this service. + Unimplemented Code = 12 + + // Internal errors. Means some invariants expected by underlying + // system has been broken. If you see one of these errors, + // something is very broken. + Internal Code = 13 + + // Unavailable indicates the service is currently unavailable. + // This is a most likely a transient condition and may be corrected + // by retrying with a backoff. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + Unavailable Code = 14 + + // DataLoss indicates unrecoverable data loss or corruption. + DataLoss Code = 15 +) diff --git a/Godeps/_workspace/src/google.golang.org/grpc/credentials/credentials.go b/Godeps/_workspace/src/google.golang.org/grpc/credentials/credentials.go new file mode 100644 index 000000000..d94fa2fbd --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/credentials/credentials.go @@ -0,0 +1,239 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// Package credentials implements various credentials supported by gRPC library, +// which encapsulate all the state needed by a client to authenticate with a +// server and make various assertions, e.g., about the client's identity, role, +// or whether it is authorized to make a particular call. +package credentials + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + "strings" + "time" + + "golang.org/x/net/context" +) + +var ( + // alpnProtoStr are the specified application level protocols for gRPC. + alpnProtoStr = []string{"h2"} +) + +// Credentials defines the common interface all supported credentials must +// implement. +type Credentials interface { + // GetRequestMetadata gets the current request metadata, refreshing + // tokens if required. This should be called by the transport layer on + // each request, and the data should be populated in headers or other + // context. uri is the URI of the entry point for the request. When + // supported by the underlying implementation, ctx can be used for + // timeout and cancellation. + // TODO(zhaoq): Define the set of the qualified keys instead of leaving + // it as an arbitrary string. + GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) + // RequireTransportSecurity indicates whether the credentails requires + // transport security. + RequireTransportSecurity() bool +} + +// ProtocolInfo provides information regarding the gRPC wire protocol version, +// security protocol, security protocol version in use, etc. +type ProtocolInfo struct { + // ProtocolVersion is the gRPC wire protocol version. + ProtocolVersion string + // SecurityProtocol is the security protocol in use. + SecurityProtocol string + // SecurityVersion is the security protocol version. + SecurityVersion string +} + +// AuthInfo defines the common interface for the auth information the users are interested in. +type AuthInfo interface { + AuthType() string +} + +type authInfoKey struct{} + +// NewContext creates a new context with authInfo attached. +func NewContext(ctx context.Context, authInfo AuthInfo) context.Context { + return context.WithValue(ctx, authInfoKey{}, authInfo) +} + +// FromContext returns the authInfo in ctx if it exists. +func FromContext(ctx context.Context) (authInfo AuthInfo, ok bool) { + authInfo, ok = ctx.Value(authInfoKey{}).(AuthInfo) + return +} + +// TransportAuthenticator defines the common interface for all the live gRPC wire +// protocols and supported transport security protocols (e.g., TLS, SSL). +type TransportAuthenticator interface { + // ClientHandshake does the authentication handshake specified by the corresponding + // authentication protocol on rawConn for clients. It returns the authenticated + // connection and the corresponding auth information about the connection. + ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (net.Conn, AuthInfo, error) + // ServerHandshake does the authentication handshake for servers. It returns + // the authenticated connection and the corresponding auth information about + // the connection. + ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) + // Info provides the ProtocolInfo of this TransportAuthenticator. + Info() ProtocolInfo + Credentials +} + +// TLSInfo contains the auth information for a TLS authenticated connection. +// It implements the AuthInfo interface. +type TLSInfo struct { + State tls.ConnectionState +} + +func (t TLSInfo) AuthType() string { + return "tls" +} + +// tlsCreds is the credentials required for authenticating a connection using TLS. +type tlsCreds struct { + // TLS configuration + config tls.Config +} + +func (c tlsCreds) Info() ProtocolInfo { + return ProtocolInfo{ + SecurityProtocol: "tls", + SecurityVersion: "1.2", + } +} + +// GetRequestMetadata returns nil, nil since TLS credentials does not have +// metadata. +func (c *tlsCreds) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + return nil, nil +} + +func (c *tlsCreds) RequireTransportSecurity() bool { + return true +} + +type timeoutError struct{} + +func (timeoutError) Error() string { return "credentials: Dial timed out" } +func (timeoutError) Timeout() bool { return true } +func (timeoutError) Temporary() bool { return true } + +func (c *tlsCreds) ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (_ net.Conn, _ AuthInfo, err error) { + // borrow some code from tls.DialWithDialer + var errChannel chan error + if timeout != 0 { + errChannel = make(chan error, 2) + time.AfterFunc(timeout, func() { + errChannel <- timeoutError{} + }) + } + if c.config.ServerName == "" { + colonPos := strings.LastIndex(addr, ":") + if colonPos == -1 { + colonPos = len(addr) + } + c.config.ServerName = addr[:colonPos] + } + conn := tls.Client(rawConn, &c.config) + if timeout == 0 { + err = conn.Handshake() + } else { + go func() { + errChannel <- conn.Handshake() + }() + err = <-errChannel + } + if err != nil { + rawConn.Close() + return nil, nil, err + } + // TODO(zhaoq): Omit the auth info for client now. It is more for + // information than anything else. + return conn, nil, nil +} + +func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { + conn := tls.Server(rawConn, &c.config) + if err := conn.Handshake(); err != nil { + rawConn.Close() + return nil, nil, err + } + return conn, TLSInfo{conn.ConnectionState()}, nil +} + +// NewTLS uses c to construct a TransportAuthenticator based on TLS. +func NewTLS(c *tls.Config) TransportAuthenticator { + tc := &tlsCreds{*c} + tc.config.NextProtos = alpnProtoStr + return tc +} + +// NewClientTLSFromCert constructs a TLS from the input certificate for client. +func NewClientTLSFromCert(cp *x509.CertPool, serverName string) TransportAuthenticator { + return NewTLS(&tls.Config{ServerName: serverName, RootCAs: cp}) +} + +// NewClientTLSFromFile constructs a TLS from the input certificate file for client. +func NewClientTLSFromFile(certFile, serverName string) (TransportAuthenticator, error) { + b, err := ioutil.ReadFile(certFile) + if err != nil { + return nil, err + } + cp := x509.NewCertPool() + if !cp.AppendCertsFromPEM(b) { + return nil, fmt.Errorf("credentials: failed to append certificates") + } + return NewTLS(&tls.Config{ServerName: serverName, RootCAs: cp}), nil +} + +// NewServerTLSFromCert constructs a TLS from the input certificate for server. +func NewServerTLSFromCert(cert *tls.Certificate) TransportAuthenticator { + return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) +} + +// NewServerTLSFromFile constructs a TLS from the input certificate file and key +// file for server. +func NewServerTLSFromFile(certFile, keyFile string) (TransportAuthenticator, error) { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return nil, err + } + return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/credentials/oauth/oauth.go b/Godeps/_workspace/src/google.golang.org/grpc/credentials/oauth/oauth.go new file mode 100644 index 000000000..04943fdf0 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/credentials/oauth/oauth.go @@ -0,0 +1,177 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// Package oauth implements gRPC credentials using OAuth. +package oauth + +import ( + "fmt" + "io/ioutil" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "golang.org/x/oauth2/jwt" + "google.golang.org/grpc/credentials" +) + +// TokenSource supplies credentials from an oauth2.TokenSource. +type TokenSource struct { + oauth2.TokenSource +} + +// GetRequestMetadata gets the request metadata as a map from a TokenSource. +func (ts TokenSource) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + token, err := ts.Token() + if err != nil { + return nil, err + } + return map[string]string{ + "authorization": token.TokenType + " " + token.AccessToken, + }, nil +} + +func (ts TokenSource) RequireTransportSecurity() bool { + return true +} + +type jwtAccess struct { + jsonKey []byte +} + +func NewJWTAccessFromFile(keyFile string) (credentials.Credentials, error) { + jsonKey, err := ioutil.ReadFile(keyFile) + if err != nil { + return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) + } + return NewJWTAccessFromKey(jsonKey) +} + +func NewJWTAccessFromKey(jsonKey []byte) (credentials.Credentials, error) { + return jwtAccess{jsonKey}, nil +} + +func (j jwtAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + ts, err := google.JWTAccessTokenSourceFromJSON(j.jsonKey, uri[0]) + if err != nil { + return nil, err + } + token, err := ts.Token() + if err != nil { + return nil, err + } + return map[string]string{ + "authorization": token.TokenType + " " + token.AccessToken, + }, nil +} + +func (j jwtAccess) RequireTransportSecurity() bool { + return true +} + +// oauthAccess supplies credentials from a given token. +type oauthAccess struct { + token oauth2.Token +} + +// NewOauthAccess constructs the credentials using a given token. +func NewOauthAccess(token *oauth2.Token) credentials.Credentials { + return oauthAccess{token: *token} +} + +func (oa oauthAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + return map[string]string{ + "authorization": oa.token.TokenType + " " + oa.token.AccessToken, + }, nil +} + +func (oa oauthAccess) RequireTransportSecurity() bool { + return true +} + +// NewComputeEngine constructs the credentials that fetches access tokens from +// Google Compute Engine (GCE)'s metadata server. It is only valid to use this +// if your program is running on a GCE instance. +// TODO(dsymonds): Deprecate and remove this. +func NewComputeEngine() credentials.Credentials { + return TokenSource{google.ComputeTokenSource("")} +} + +// serviceAccount represents credentials via JWT signing key. +type serviceAccount struct { + config *jwt.Config +} + +func (s serviceAccount) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + token, err := s.config.TokenSource(ctx).Token() + if err != nil { + return nil, err + } + return map[string]string{ + "authorization": token.TokenType + " " + token.AccessToken, + }, nil +} + +func (s serviceAccount) RequireTransportSecurity() bool { + return true +} + +// NewServiceAccountFromKey constructs the credentials using the JSON key slice +// from a Google Developers service account. +func NewServiceAccountFromKey(jsonKey []byte, scope ...string) (credentials.Credentials, error) { + config, err := google.JWTConfigFromJSON(jsonKey, scope...) + if err != nil { + return nil, err + } + return serviceAccount{config: config}, nil +} + +// NewServiceAccountFromFile constructs the credentials using the JSON key file +// of a Google Developers service account. +func NewServiceAccountFromFile(keyFile string, scope ...string) (credentials.Credentials, error) { + jsonKey, err := ioutil.ReadFile(keyFile) + if err != nil { + return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) + } + return NewServiceAccountFromKey(jsonKey, scope...) +} + +// NewApplicationDefault returns "Application Default Credentials". For more +// detail, see https://developers.google.com/accounts/docs/application-default-credentials. +func NewApplicationDefault(ctx context.Context, scope ...string) (credentials.Credentials, error) { + t, err := google.DefaultTokenSource(ctx, scope...) + if err != nil { + return nil, err + } + return TokenSource{t}, nil +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/doc.go b/Godeps/_workspace/src/google.golang.org/grpc/doc.go new file mode 100644 index 000000000..c0f54f759 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/doc.go @@ -0,0 +1,6 @@ +/* +Package grpc implements an RPC system called gRPC. + +See https://github.com/grpc/grpc for more information about gRPC. +*/ +package grpc diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/README.md b/Godeps/_workspace/src/google.golang.org/grpc/examples/README.md new file mode 100644 index 000000000..db4e54209 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/examples/README.md @@ -0,0 +1,53 @@ +gRPC in 3 minutes (Go) +====================== + +BACKGROUND +------------- +For this sample, we've already generated the server and client stubs from [helloworld.proto](examples/helloworld/proto/helloworld.proto). + +PREREQUISITES +------------- + +- This requires Go 1.4 +- Requires that [GOPATH is set](https://golang.org/doc/code.html#GOPATH) +```sh +$ go help gopath +$ # ensure the PATH contains $GOPATH/bin +$ export PATH=$PATH:$GOPATH/bin +``` + +INSTALL +------- + +```sh +$ go get -u github.com/grpc/grpc-go/examples/helloworld/greeter_client +$ go get -u github.com/grpc/grpc-go/examples/helloworld/greeter_server +``` + +TRY IT! +------- + +- Run the server +```sh +$ greeter_server & +``` + +- Run the client +```sh +$ greeter_client +``` + +OPTIONAL - Rebuilding the generated code +---------------------------------------- + +1 First [install protoc](https://github.com/google/protobuf/blob/master/INSTALL.txt) + - For now, this needs to be installed from source + - This is will change once proto3 is officially released + +2 Install the protoc Go plugin. +```sh +$ go get -a github.com/golang/protobuf/protoc-gen-go +$ +$ # from this dir; invoke protoc +$ protoc -I ./helloworld/proto/ ./helloworld/proto/helloworld.proto --go_out=plugins=grpc:helloworld +``` diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/gotutorial.md b/Godeps/_workspace/src/google.golang.org/grpc/examples/gotutorial.md new file mode 100644 index 000000000..8df15a4a4 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/examples/gotutorial.md @@ -0,0 +1,431 @@ +#gRPC Basics: Go + +This tutorial provides a basic Go programmer's introduction to working with gRPC. By walking through this example you'll learn how to: + +- Define a service in a .proto file. +- Generate server and client code using the protocol buffer compiler. +- Use the Go gRPC API to write a simple client and server for your service. + +It assumes that you have read the [Getting started](https://github.com/grpc/grpc/tree/master/examples) guide and are familiar with [protocol buffers] (https://developers.google.com/protocol-buffers/docs/overview). Note that the example in this tutorial uses the proto3 version of the protocol buffers language, which is currently in alpha release:you can find out more in the [proto3 language guide](https://developers.google.com/protocol-buffers/docs/proto3) and see the [release notes](https://github.com/google/protobuf/releases) for the new version in the protocol buffers Github repository. + +This isn't a comprehensive guide to using gRPC in Go: more reference documentation is coming soon. + +## Why use gRPC? + +Our example is a simple route mapping application that lets clients get information about features on their route, create a summary of their route, and exchange route information such as traffic updates with the server and other clients. + +With gRPC we can define our service once in a .proto file and implement clients and servers in any of gRPC's supported languages, which in turn can be run in environments ranging from servers inside Google to your own tablet - all the complexity of communication between different languages and environments is handled for you by gRPC. We also get all the advantages of working with protocol buffers, including efficient serialization, a simple IDL, and easy interface updating. + +## Example code and setup + +The example code for our tutorial is in [grpc/grpc-go/examples/route_guide](https://github.com/grpc/grpc-go/tree/master/examples/route_guide). To download the example, clone the `grpc-go` repository by running the following command: +```shell +$ go get google.golang.org/grpc +``` + +Then change your current directory to `grpc-go/examples/route_guide`: +```shell +$ cd $GOPATH/src/google.golang.org/grpc/examples/route_guide +``` + +You also should have the relevant tools installed to generate the server and client interface code - if you don't already, follow the setup instructions in [the Go quick start guide](examples/). + + +## Defining the service + +Our first step (as you'll know from [Getting started](https://github.com/grpc/grpc/tree/master/examples)) is to define the gRPC *service* and the method *request* and *response* types using [protocol buffers] (https://developers.google.com/protocol-buffers/docs/overview). You can see the complete .proto file in [`examples/route_guide/proto/route_guide.proto`](examples/route_guide/proto/route_guide.proto). + +To define a service, you specify a named `service` in your .proto file: + +```proto +service RouteGuide { + ... +} +``` + +Then you define `rpc` methods inside your service definition, specifying their request and response types. gRPC lets you define four kinds of service method, all of which are used in the `RouteGuide` service: + +- A *simple RPC* where the client sends a request to the server using the stub and waits for a response to come back, just like a normal function call. +```proto + // Obtains the feature at a given position. + rpc GetFeature(Point) returns (Feature) {} +``` + +- A *server-side streaming RPC* where the client sends a request to the server and gets a stream to read a sequence of messages back. The client reads from the returned stream until there are no more messages. As you can see in our example, you specify a server-side streaming method by placing the `stream` keyword before the *response* type. +```proto + // Obtains the Features available within the given Rectangle. Results are + // streamed rather than returned at once (e.g. in a response message with a + // repeated field), as the rectangle may cover a large area and contain a + // huge number of features. + rpc ListFeatures(Rectangle) returns (stream Feature) {} +``` + +- A *client-side streaming RPC* where the client writes a sequence of messages and sends them to the server, again using a provided stream. Once the client has finished writing the messages, it waits for the server to read them all and return its response. You specify a client-side streaming method by placing the `stream` keyword before the *request* type. +```proto + // Accepts a stream of Points on a route being traversed, returning a + // RouteSummary when traversal is completed. + rpc RecordRoute(stream Point) returns (RouteSummary) {} +``` + +- A *bidirectional streaming RPC* where both sides send a sequence of messages using a read-write stream. The two streams operate independently, so clients and servers can read and write in whatever order they like: for example, the server could wait to receive all the client messages before writing its responses, or it could alternately read a message then write a message, or some other combination of reads and writes. The order of messages in each stream is preserved. You specify this type of method by placing the `stream` keyword before both the request and the response. +```proto + // Accepts a stream of RouteNotes sent while a route is being traversed, + // while receiving other RouteNotes (e.g. from other users). + rpc RouteChat(stream RouteNote) returns (stream RouteNote) {} +``` + +Our .proto file also contains protocol buffer message type definitions for all the request and response types used in our service methods - for example, here's the `Point` message type: +```proto +// Points are represented as latitude-longitude pairs in the E7 representation +// (degrees multiplied by 10**7 and rounded to the nearest integer). +// Latitudes should be in the range +/- 90 degrees and longitude should be in +// the range +/- 180 degrees (inclusive). +message Point { + int32 latitude = 1; + int32 longitude = 2; +} +``` + + +## Generating client and server code + +Next we need to generate the gRPC client and server interfaces from our .proto service definition. We do this using the protocol buffer compiler `protoc` with a special gRPC Go plugin. + +For simplicity, we've provided a [bash script](https://github.com/grpc/grpc-go/blob/master/codegen.sh) that runs `protoc` for you with the appropriate plugin, input, and output (if you want to run this by yourself, make sure you've installed protoc and followed the gRPC-Go [installation instructions](https://github.com/grpc/grpc-go/blob/master/README.md) first): + +```shell +$ codegen.sh route_guide.proto +``` + +which actually runs: + +```shell +$ protoc --go_out=plugins=grpc:. route_guide.proto +``` + +Running this command generates the following file in your current directory: +- `route_guide.pb.go` + +This contains: +- All the protocol buffer code to populate, serialize, and retrieve our request and response message types +- An interface type (or *stub*) for clients to call with the methods defined in the `RouteGuide` service. +- An interface type for servers to implement, also with the methods defined in the `RouteGuide` service. + + + +## Creating the server + +First let's look at how we create a `RouteGuide` server. If you're only interested in creating gRPC clients, you can skip this section and go straight to [Creating the client](#client) (though you might find it interesting anyway!). + +There are two parts to making our `RouteGuide` service do its job: +- Implementing the service interface generated from our service definition: doing the actual "work" of our service. +- Running a gRPC server to listen for requests from clients and dispatch them to the right service implementation. + +You can find our example `RouteGuide` server in [grpc-go/examples/route_guide/server/server.go](https://github.com/grpc/grpc-go/tree/master/examples/route_guide/server/server.go). Let's take a closer look at how it works. + +### Implementing RouteGuide + +As you can see, our server has a `routeGuideServer` struct type that implements the generated `RouteGuideServer` interface: + +```go +type routeGuideServer struct { + ... +} +... + +func (s *routeGuideServer) GetFeature(ctx context.Context, point *pb.Point) (*pb.Feature, error) { + ... +} +... + +func (s *routeGuideServer) ListFeatures(rect *pb.Rectangle, stream pb.RouteGuide_ListFeaturesServer) error { + ... +} +... + +func (s *routeGuideServer) RecordRoute(stream pb.RouteGuide_RecordRouteServer) error { + ... +} +... + +func (s *routeGuideServer) RouteChat(stream pb.RouteGuide_RouteChatServer) error { + ... +} +... +``` + +#### Simple RPC +`routeGuideServer` implements all our service methods. Let's look at the simplest type first, `GetFeature`, which just gets a `Point` from the client and returns the corresponding feature information from its database in a `Feature`. + +```go +func (s *routeGuideServer) GetFeature(ctx context.Context, point *pb.Point) (*pb.Feature, error) { + for _, feature := range s.savedFeatures { + if proto.Equal(feature.Location, point) { + return feature, nil + } + } + // No feature was found, return an unnamed feature + return &pb.Feature{"", point}, nil +} +``` + +The method is passed a context object for the RPC and the client's `Point` protocol buffer request. It returns a `Feature` protocol buffer object with the response information and an `error`. In the method we populate the `Feature` with the appropriate information, and then `return` it along with an `nil` error to tell gRPC that we've finished dealing with the RPC and that the `Feature` can be returned to the client. + +#### Server-side streaming RPC +Now let's look at one of our streaming RPCs. `ListFeatures` is a server-side streaming RPC, so we need to send back multiple `Feature`s to our client. + +```go +func (s *routeGuideServer) ListFeatures(rect *pb.Rectangle, stream pb.RouteGuide_ListFeaturesServer) error { + for _, feature := range s.savedFeatures { + if inRange(feature.Location, rect) { + if err := stream.Send(feature); err != nil { + return err + } + } + } + return nil +} +``` + +As you can see, instead of getting simple request and response objects in our method parameters, this time we get a request object (the `Rectangle` in which our client wants to find `Feature`s) and a special `RouteGuide_ListFeaturesServer` object to write our responses. + +In the method, we populate as many `Feature` objects as we need to return, writing them to the `RouteGuide_ListFeaturesServer` using its `Send()` method. Finally, as in our simple RPC, we return a `nil` error to tell gRPC that we've finished writing responses. Should any error happen in this call, we return a non-`nil` error; the gRPC layer will translate it into an appropriate RPC status to be sent on the wire. + +#### Client-side streaming RPC +Now let's look at something a little more complicated: the client-side streaming method `RecordRoute`, where we get a stream of `Point`s from the client and return a single `RouteSummary` with information about their trip. As you can see, this time the method doesn't have a request parameter at all. Instead, it gets a `RouteGuide_RecordRouteServer` stream, which the server can use to both read *and* write messages - it can receive client messages using its `Recv()` method and return its single response using its `SendAndClose()` method. + +```go +func (s *routeGuideServer) RecordRoute(stream pb.RouteGuide_RecordRouteServer) error { + var pointCount, featureCount, distance int32 + var lastPoint *pb.Point + startTime := time.Now() + for { + point, err := stream.Recv() + if err == io.EOF { + endTime := time.Now() + return stream.SendAndClose(&pb.RouteSummary{ + PointCount: pointCount, + FeatureCount: featureCount, + Distance: distance, + ElapsedTime: int32(endTime.Sub(startTime).Seconds()), + }) + } + if err != nil { + return err + } + pointCount++ + for _, feature := range s.savedFeatures { + if proto.Equal(feature.Location, point) { + featureCount++ + } + } + if lastPoint != nil { + distance += calcDistance(lastPoint, point) + } + lastPoint = point + } +} +``` + +In the method body we use the `RouteGuide_RecordRouteServer`s `Recv()` method to repeatedly read in our client's requests to a request object (in this case a `Point`) until there are no more messages: the server needs to check the the error returned from `Read()` after each call. If this is `nil`, the stream is still good and it can continue reading; if it's `io.EOF` the message stream has ended and the server can return its `RouteSummary`. If it has any other value, we return the error "as is" so that it'll be translated to an RPC status by the gRPC layer. + +#### Bidirectional streaming RPC +Finally, let's look at our bidirectional streaming RPC `RouteChat()`. + +```go +func (s *routeGuideServer) RouteChat(stream pb.RouteGuide_RouteChatServer) error { + for { + in, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + key := serialize(in.Location) + ... // look for notes to be sent to client + for _, note := range s.routeNotes[key] { + if err := stream.Send(note); err != nil { + return err + } + } + } +} +``` + +This time we get a `RouteGuide_RouteChatServer` stream that, as in our client-side streaming example, can be used to read and write messages. However, this time we return values via our method's stream while the client is still writing messages to *their* message stream. + +The syntax for reading and writing here is very similar to our client-streaming method, except the server uses the stream's `Send()` method rather than `SendAndClose()` because it's writing multiple responses. Although each side will always get the other's messages in the order they were written, both the client and server can read and write in any order — the streams operate completely independently. + +### Starting the server + +Once we've implemented all our methods, we also need to start up a gRPC server so that clients can actually use our service. The following snippet shows how we do this for our `RouteGuide` service: + +```go +flag.Parse() +lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) +if err != nil { + log.Fatalf("failed to listen: %v", err) +} +grpcServer := grpc.NewServer() +pb.RegisterRouteGuideServer(grpcServer, &routeGuideServer{}) +... // determine whether to use TLS +grpcServer.Serve(lis) +``` +To build and start a server, we: + +1. Specify the port we want to use to listen for client requests using `lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port))`. +2. Create an instance of the gRPC server using `grpc.NewServer()`. +3. Register our service implementation with the gRPC server. +4. Call `Serve()` on the server with our port details to do a blocking wait until the process is killed or `Stop()` is called. + + +## Creating the client + +In this section, we'll look at creating a Go client for our `RouteGuide` service. You can see our complete example client code in [grpc-go/examples/route_guide/client/client.go](https://github.com/grpc/grpc-go/tree/master/examples/route_guide/client/client.go). + +### Creating a stub + +To call service methods, we first need to create a gRPC *channel* to communicate with the server. We create this by passing the server address and port number to `grpc.Dial()` as follows: + +```go +conn, err := grpc.Dial(*serverAddr) +if err != nil { + ... +} +defer conn.Close() +``` + +You can use `DialOptions` to set the auth credentials (e.g., TLS, GCE credentials, JWT credentials) in `grpc.Dial` if the service you request requires that - however, we don't need to do this for our `RouteGuide` service. + +Once the gRPC *channel* is setup, we need a client *stub* to perform RPCs. We get this using the `NewRouteGuideClient` method provided in the `pb` package we generated from our .proto. + +```go +client := pb.NewRouteGuideClient(conn) +``` + +### Calling service methods + +Now let's look at how we call our service methods. Note that in gRPC-Go, RPCs operate in a blocking/synchronous mode, which means that the RPC call waits for the server to respond, and will either return a response or an error. + +#### Simple RPC + +Calling the simple RPC `GetFeature` is nearly as straightforward as calling a local method. + +```go +feature, err := client.GetFeature(context.Background(), &pb.Point{409146138, -746188906}) +if err != nil { + ... +} +``` + +As you can see, we call the method on the stub we got earlier. In our method parameters we create and populate a request protocol buffer object (in our case `Point`). We also pass a `context.Context` object which lets us change our RPC's behaviour if necessary, such as time-out/cancel an RPC in flight. If the call doesn't return an error, then we can read the response information from the server from the first return value. + +```go +log.Println(feature) +``` + +#### Server-side streaming RPC + +Here's where we call the server-side streaming method `ListFeatures`, which returns a stream of geographical `Feature`s. If you've already read [Creating the server](#server) some of this may look very familiar - streaming RPCs are implemented in a similar way on both sides. + +```go +rect := &pb.Rectangle{ ... } // initialize a pb.Rectangle +stream, err := client.ListFeatures(context.Background(), rect) +if err != nil { + ... +} +for { + feature, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + log.Fatalf("%v.ListFeatures(_) = _, %v", client, err) + } + log.Println(feature) +} +``` + +As in the simple RPC, we pass the method a context and a request. However, instead of getting a response object back, we get back an instance of `RouteGuide_ListFeaturesClient`. The client can use the `RouteGuide_ListFeaturesClient` stream to read the server's responses. + +We use the `RouteGuide_ListFeaturesClient`'s `Recv()` method to repeatedly read in the server's responses to a response protocol buffer object (in this case a `Feature`) until there are no more messages: the client needs to check the error `err` returned from `Recv()` after each call. If `nil`, the stream is still good and it can continue reading; if it's `io.EOF` then the message stream has ended; otherwise there must be an RPC error, which is passed over through `err`. + +#### Client-side streaming RPC + +The client-side streaming method `RecordRoute` is similar to the server-side method, except that we only pass the method a context and get a `RouteGuide_RecordRouteClient` stream back, which we can use to both write *and* read messages. + +```go +// Create a random number of random points +r := rand.New(rand.NewSource(time.Now().UnixNano())) +pointCount := int(r.Int31n(100)) + 2 // Traverse at least two points +var points []*pb.Point +for i := 0; i < pointCount; i++ { + points = append(points, randomPoint(r)) +} +log.Printf("Traversing %d points.", len(points)) +stream, err := client.RecordRoute(context.Background()) +if err != nil { + log.Fatalf("%v.RecordRoute(_) = _, %v", client, err) +} +for _, point := range points { + if err := stream.Send(point); err != nil { + log.Fatalf("%v.Send(%v) = %v", stream, point, err) + } +} +reply, err := stream.CloseAndRecv() +if err != nil { + log.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) +} +log.Printf("Route summary: %v", reply) +``` + +The `RouteGuide_RecordRouteClient` has a `Send()` method that we can use to send requests to the server. Once we've finished writing our client's requests to the stream using `Send()`, we need to call `CloseAndRecv()` on the stream to let gRPC know that we've finished writing and are expecting to receive a response. We get our RPC status from the `err` returned from `CloseAndRecv()`. If the status is `nil`, then the first return value from `CloseAndRecv()` will be a valid server response. + +#### Bidirectional streaming RPC + +Finally, let's look at our bidirectional streaming RPC `RouteChat()`. As in the case of `RecordRoute`, we only pass the method a context object and get back a stream that we can use to both write and read messages. However, this time we return values via our method's stream while the server is still writing messages to *their* message stream. + +```go +stream, err := client.RouteChat(context.Background()) +waitc := make(chan struct{}) +go func() { + for { + in, err := stream.Recv() + if err == io.EOF { + // read done. + close(waitc) + return + } + if err != nil { + log.Fatalf("Failed to receive a note : %v", err) + } + log.Printf("Got message %s at point(%d, %d)", in.Message, in.Location.Latitude, in.Location.Longitude) + } +}() +for _, note := range notes { + if err := stream.Send(note); err != nil { + log.Fatalf("Failed to send a note: %v", err) + } +} +stream.CloseSend() +<-waitc +``` + +The syntax for reading and writing here is very similar to our client-side streaming method, except we use the stream's `CloseSend()` method once we've finished our call. Although each side will always get the other's messages in the order they were written, both the client and server can read and write in any order — the streams operate completely independently. + +## Try it out! + +To compile and run the server, assuming you are in the folder +`$GOPATH/src/google.golang.org/grpc/examples/route_guide`, simply: + +```sh +$ go run server/server.go +``` + +Likewise, to run the client: + +```sh +$ go run client/client.go +``` + diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/helloworld/greeter_client/main.go b/Godeps/_workspace/src/google.golang.org/grpc/examples/helloworld/greeter_client/main.go new file mode 100644 index 000000000..1e02ab154 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/examples/helloworld/greeter_client/main.go @@ -0,0 +1,69 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package main + +import ( + "log" + "os" + + pb "google.golang.org/grpc/examples/helloworld/helloworld" + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +const ( + address = "localhost:50051" + defaultName = "world" +) + +func main() { + // Set up a connection to the server. + conn, err := grpc.Dial(address, grpc.WithInsecure()) + if err != nil { + log.Fatalf("did not connect: %v", err) + } + defer conn.Close() + c := pb.NewGreeterClient(conn) + + // Contact the server and print out its response. + name := defaultName + if len(os.Args) > 1 { + name = os.Args[1] + } + r, err := c.SayHello(context.Background(), &pb.HelloRequest{Name: name}) + if err != nil { + log.Fatalf("could not greet: %v", err) + } + log.Printf("Greeting: %s", r.Message) +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/helloworld/greeter_server/main.go b/Godeps/_workspace/src/google.golang.org/grpc/examples/helloworld/greeter_server/main.go new file mode 100644 index 000000000..ba985df4c --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/examples/helloworld/greeter_server/main.go @@ -0,0 +1,65 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package main + +import ( + "log" + "net" + + pb "google.golang.org/grpc/examples/helloworld/helloworld" + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +const ( + port = ":50051" +) + +// server is used to implement hellowrld.GreeterServer. +type server struct{} + +// SayHello implements helloworld.GreeterServer +func (s *server) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { + return &pb.HelloReply{Message: "Hello " + in.Name}, nil +} + +func main() { + lis, err := net.Listen("tcp", port) + if err != nil { + log.Fatalf("failed to listen: %v", err) + } + s := grpc.NewServer() + pb.RegisterGreeterServer(s, &server{}) + s.Serve(lis) +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.pb.go b/Godeps/_workspace/src/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.pb.go new file mode 100644 index 000000000..1ff931a38 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.pb.go @@ -0,0 +1,109 @@ +// Code generated by protoc-gen-go. +// source: helloworld.proto +// DO NOT EDIT! + +/* +Package helloworld is a generated protocol buffer package. + +It is generated from these files: + helloworld.proto + +It has these top-level messages: + HelloRequest + HelloReply +*/ +package helloworld + +import proto "github.com/golang/protobuf/proto" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal + +// The request message containing the user's name. +type HelloRequest struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *HelloRequest) Reset() { *m = HelloRequest{} } +func (m *HelloRequest) String() string { return proto.CompactTextString(m) } +func (*HelloRequest) ProtoMessage() {} + +// The response message containing the greetings +type HelloReply struct { + Message string `protobuf:"bytes,1,opt,name=message" json:"message,omitempty"` +} + +func (m *HelloReply) Reset() { *m = HelloReply{} } +func (m *HelloReply) String() string { return proto.CompactTextString(m) } +func (*HelloReply) ProtoMessage() {} + +func init() { +} + +// Client API for Greeter service + +type GreeterClient interface { + // Sends a greeting + SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) +} + +type greeterClient struct { + cc *grpc.ClientConn +} + +func NewGreeterClient(cc *grpc.ClientConn) GreeterClient { + return &greeterClient{cc} +} + +func (c *greeterClient) SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) { + out := new(HelloReply) + err := grpc.Invoke(ctx, "/helloworld.Greeter/SayHello", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Greeter service + +type GreeterServer interface { + // Sends a greeting + SayHello(context.Context, *HelloRequest) (*HelloReply, error) +} + +func RegisterGreeterServer(s *grpc.Server, srv GreeterServer) { + s.RegisterService(&_Greeter_serviceDesc, srv) +} + +func _Greeter_SayHello_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { + in := new(HelloRequest) + if err := codec.Unmarshal(buf, in); err != nil { + return nil, err + } + out, err := srv.(GreeterServer).SayHello(ctx, in) + if err != nil { + return nil, err + } + return out, nil +} + +var _Greeter_serviceDesc = grpc.ServiceDesc{ + ServiceName: "helloworld.Greeter", + HandlerType: (*GreeterServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SayHello", + Handler: _Greeter_SayHello_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.proto b/Godeps/_workspace/src/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.proto new file mode 100644 index 000000000..7d58870a7 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.proto @@ -0,0 +1,51 @@ +// Copyright 2015, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +option java_package = "io.grpc.examples"; +option objc_class_prefix = "HLW"; + +package helloworld; + +// The greeting service definition. +service Greeter { + // Sends a greeting + rpc SayHello (HelloRequest) returns (HelloReply) {} +} + +// The request message containing the user's name. +message HelloRequest { + string name = 1; +} + +// The response message containing the greetings +message HelloReply { + string message = 1; +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/README.md b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/README.md new file mode 100644 index 000000000..a7c0c2b0e --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/README.md @@ -0,0 +1,35 @@ +# Description +The route guide server and client demonstrate how to use grpc go libraries to +perform unary, client streaming, server streaming and full duplex RPCs. + +Please refer to [gRPC Basics: Go] (http://www.grpc.io/docs/tutorials/basic/go.html) for more information. + +See the definition of the route guide service in proto/route_guide.proto. + +# Run the sample code +To compile and run the server, assuming you are in the root of the route_guide +folder, i.e., .../examples/route_guide/, simply: + +```sh +$ go run server/server.go +``` + +Likewise, to run the client: + +```sh +$ go run client/client.go +``` + +# Optional command line flags +The server and client both take optional command line flags. For example, the +client and server run without TLS by default. To enable TLS: + +```sh +$ go run server/server.go -tls=true +``` + +and + +```sh +$ go run client/client.go -tls=true +``` diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/client/client.go b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/client/client.go new file mode 100644 index 000000000..a96c0302c --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/client/client.go @@ -0,0 +1,202 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// Package main implements a simple gRPC client that demonstrates how to use gRPC-Go libraries +// to perform unary, client streaming, server streaming and full duplex RPCs. +// +// It interacts with the route guide service whose definition can be found in proto/route_guide.proto. +package main + +import ( + "flag" + "io" + "math/rand" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + pb "google.golang.org/grpc/examples/route_guide/routeguide" + "google.golang.org/grpc/grpclog" +) + +var ( + tls = flag.Bool("tls", false, "Connection uses TLS if true, else plain TCP") + caFile = flag.String("ca_file", "testdata/ca.pem", "The file containning the CA root cert file") + serverAddr = flag.String("server_addr", "127.0.0.1:10000", "The server address in the format of host:port") + serverHostOverride = flag.String("server_host_override", "x.test.youtube.com", "The server name use to verify the hostname returned by TLS handshake") +) + +// printFeature gets the feature for the given point. +func printFeature(client pb.RouteGuideClient, point *pb.Point) { + grpclog.Printf("Getting feature for point (%d, %d)", point.Latitude, point.Longitude) + feature, err := client.GetFeature(context.Background(), point) + if err != nil { + grpclog.Fatalf("%v.GetFeatures(_) = _, %v: ", client, err) + } + grpclog.Println(feature) +} + +// printFeatures lists all the features within the given bounding Rectangle. +func printFeatures(client pb.RouteGuideClient, rect *pb.Rectangle) { + grpclog.Printf("Looking for features within %v", rect) + stream, err := client.ListFeatures(context.Background(), rect) + if err != nil { + grpclog.Fatalf("%v.ListFeatures(_) = _, %v", client, err) + } + for { + feature, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + grpclog.Fatalf("%v.ListFeatures(_) = _, %v", client, err) + } + grpclog.Println(feature) + } +} + +// runRecordRoute sends a sequence of points to server and expects to get a RouteSummary from server. +func runRecordRoute(client pb.RouteGuideClient) { + // Create a random number of random points + r := rand.New(rand.NewSource(time.Now().UnixNano())) + pointCount := int(r.Int31n(100)) + 2 // Traverse at least two points + var points []*pb.Point + for i := 0; i < pointCount; i++ { + points = append(points, randomPoint(r)) + } + grpclog.Printf("Traversing %d points.", len(points)) + stream, err := client.RecordRoute(context.Background()) + if err != nil { + grpclog.Fatalf("%v.RecordRoute(_) = _, %v", client, err) + } + for _, point := range points { + if err := stream.Send(point); err != nil { + grpclog.Fatalf("%v.Send(%v) = %v", stream, point, err) + } + } + reply, err := stream.CloseAndRecv() + if err != nil { + grpclog.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) + } + grpclog.Printf("Route summary: %v", reply) +} + +// runRouteChat receives a sequence of route notes, while sending notes for various locations. +func runRouteChat(client pb.RouteGuideClient) { + notes := []*pb.RouteNote{ + {&pb.Point{0, 1}, "First message"}, + {&pb.Point{0, 2}, "Second message"}, + {&pb.Point{0, 3}, "Third message"}, + {&pb.Point{0, 1}, "Fourth message"}, + {&pb.Point{0, 2}, "Fifth message"}, + {&pb.Point{0, 3}, "Sixth message"}, + } + stream, err := client.RouteChat(context.Background()) + if err != nil { + grpclog.Fatalf("%v.RouteChat(_) = _, %v", client, err) + } + waitc := make(chan struct{}) + go func() { + for { + in, err := stream.Recv() + if err == io.EOF { + // read done. + close(waitc) + return + } + if err != nil { + grpclog.Fatalf("Failed to receive a note : %v", err) + } + grpclog.Printf("Got message %s at point(%d, %d)", in.Message, in.Location.Latitude, in.Location.Longitude) + } + }() + for _, note := range notes { + if err := stream.Send(note); err != nil { + grpclog.Fatalf("Failed to send a note: %v", err) + } + } + stream.CloseSend() + <-waitc +} + +func randomPoint(r *rand.Rand) *pb.Point { + lat := (r.Int31n(180) - 90) * 1e7 + long := (r.Int31n(360) - 180) * 1e7 + return &pb.Point{lat, long} +} + +func main() { + flag.Parse() + var opts []grpc.DialOption + if *tls { + var sn string + if *serverHostOverride != "" { + sn = *serverHostOverride + } + var creds credentials.TransportAuthenticator + if *caFile != "" { + var err error + creds, err = credentials.NewClientTLSFromFile(*caFile, sn) + if err != nil { + grpclog.Fatalf("Failed to create TLS credentials %v", err) + } + } else { + creds = credentials.NewClientTLSFromCert(nil, sn) + } + opts = append(opts, grpc.WithTransportCredentials(creds)) + } else { + opts = append(opts, grpc.WithInsecure()) + } + conn, err := grpc.Dial(*serverAddr, opts...) + if err != nil { + grpclog.Fatalf("fail to dial: %v", err) + } + defer conn.Close() + client := pb.NewRouteGuideClient(conn) + + // Looking for a valid feature + printFeature(client, &pb.Point{409146138, -746188906}) + + // Feature missing. + printFeature(client, &pb.Point{0, 0}) + + // Looking for features between 40, -75 and 42, -73. + printFeatures(client, &pb.Rectangle{&pb.Point{400000000, -750000000}, &pb.Point{420000000, -730000000}}) + + // RecordRoute + runRecordRoute(client) + + // RouteChat + runRouteChat(client) +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.pb.go b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.pb.go new file mode 100644 index 000000000..fcf5c7484 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.pb.go @@ -0,0 +1,425 @@ +// Code generated by protoc-gen-go. +// source: route_guide.proto +// DO NOT EDIT! + +/* +Package proto is a generated protocol buffer package. + +It is generated from these files: + route_guide.proto + +It has these top-level messages: + Point + Rectangle + Feature + RouteNote + RouteSummary +*/ +package routeguide + +import proto1 "github.com/golang/protobuf/proto" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto1.Marshal + +// Points are represented as latitude-longitude pairs in the E7 representation +// (degrees multiplied by 10**7 and rounded to the nearest integer). +// Latitudes should be in the range +/- 90 degrees and longitude should be in +// the range +/- 180 degrees (inclusive). +type Point struct { + Latitude int32 `protobuf:"varint,1,opt,name=latitude" json:"latitude,omitempty"` + Longitude int32 `protobuf:"varint,2,opt,name=longitude" json:"longitude,omitempty"` +} + +func (m *Point) Reset() { *m = Point{} } +func (m *Point) String() string { return proto1.CompactTextString(m) } +func (*Point) ProtoMessage() {} + +// A latitude-longitude rectangle, represented as two diagonally opposite +// points "lo" and "hi". +type Rectangle struct { + // One corner of the rectangle. + Lo *Point `protobuf:"bytes,1,opt,name=lo" json:"lo,omitempty"` + // The other corner of the rectangle. + Hi *Point `protobuf:"bytes,2,opt,name=hi" json:"hi,omitempty"` +} + +func (m *Rectangle) Reset() { *m = Rectangle{} } +func (m *Rectangle) String() string { return proto1.CompactTextString(m) } +func (*Rectangle) ProtoMessage() {} + +func (m *Rectangle) GetLo() *Point { + if m != nil { + return m.Lo + } + return nil +} + +func (m *Rectangle) GetHi() *Point { + if m != nil { + return m.Hi + } + return nil +} + +// A feature names something at a given point. +// +// If a feature could not be named, the name is empty. +type Feature struct { + // The name of the feature. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The point where the feature is detected. + Location *Point `protobuf:"bytes,2,opt,name=location" json:"location,omitempty"` +} + +func (m *Feature) Reset() { *m = Feature{} } +func (m *Feature) String() string { return proto1.CompactTextString(m) } +func (*Feature) ProtoMessage() {} + +func (m *Feature) GetLocation() *Point { + if m != nil { + return m.Location + } + return nil +} + +// A RouteNote is a message sent while at a given point. +type RouteNote struct { + // The location from which the message is sent. + Location *Point `protobuf:"bytes,1,opt,name=location" json:"location,omitempty"` + // The message to be sent. + Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` +} + +func (m *RouteNote) Reset() { *m = RouteNote{} } +func (m *RouteNote) String() string { return proto1.CompactTextString(m) } +func (*RouteNote) ProtoMessage() {} + +func (m *RouteNote) GetLocation() *Point { + if m != nil { + return m.Location + } + return nil +} + +// A RouteSummary is received in response to a RecordRoute rpc. +// +// It contains the number of individual points received, the number of +// detected features, and the total distance covered as the cumulative sum of +// the distance between each point. +type RouteSummary struct { + // The number of points received. + PointCount int32 `protobuf:"varint,1,opt,name=point_count" json:"point_count,omitempty"` + // The number of known features passed while traversing the route. + FeatureCount int32 `protobuf:"varint,2,opt,name=feature_count" json:"feature_count,omitempty"` + // The distance covered in metres. + Distance int32 `protobuf:"varint,3,opt,name=distance" json:"distance,omitempty"` + // The duration of the traversal in seconds. + ElapsedTime int32 `protobuf:"varint,4,opt,name=elapsed_time" json:"elapsed_time,omitempty"` +} + +func (m *RouteSummary) Reset() { *m = RouteSummary{} } +func (m *RouteSummary) String() string { return proto1.CompactTextString(m) } +func (*RouteSummary) ProtoMessage() {} + +func init() { +} + +// Client API for RouteGuide service + +type RouteGuideClient interface { + // A simple RPC. + // + // Obtains the feature at a given position. + // + // If no feature is found for the given point, a feature with an empty name + // should be returned. + GetFeature(ctx context.Context, in *Point, opts ...grpc.CallOption) (*Feature, error) + // A server-to-client streaming RPC. + // + // Obtains the Features available within the given Rectangle. Results are + // streamed rather than returned at once (e.g. in a response message with a + // repeated field), as the rectangle may cover a large area and contain a + // huge number of features. + ListFeatures(ctx context.Context, in *Rectangle, opts ...grpc.CallOption) (RouteGuide_ListFeaturesClient, error) + // A client-to-server streaming RPC. + // + // Accepts a stream of Points on a route being traversed, returning a + // RouteSummary when traversal is completed. + RecordRoute(ctx context.Context, opts ...grpc.CallOption) (RouteGuide_RecordRouteClient, error) + // A Bidirectional streaming RPC. + // + // Accepts a stream of RouteNotes sent while a route is being traversed, + // while receiving other RouteNotes (e.g. from other users). + RouteChat(ctx context.Context, opts ...grpc.CallOption) (RouteGuide_RouteChatClient, error) +} + +type routeGuideClient struct { + cc *grpc.ClientConn +} + +func NewRouteGuideClient(cc *grpc.ClientConn) RouteGuideClient { + return &routeGuideClient{cc} +} + +func (c *routeGuideClient) GetFeature(ctx context.Context, in *Point, opts ...grpc.CallOption) (*Feature, error) { + out := new(Feature) + err := grpc.Invoke(ctx, "/proto.RouteGuide/GetFeature", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *routeGuideClient) ListFeatures(ctx context.Context, in *Rectangle, opts ...grpc.CallOption) (RouteGuide_ListFeaturesClient, error) { + stream, err := grpc.NewClientStream(ctx, &_RouteGuide_serviceDesc.Streams[0], c.cc, "/proto.RouteGuide/ListFeatures", opts...) + if err != nil { + return nil, err + } + x := &routeGuideListFeaturesClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type RouteGuide_ListFeaturesClient interface { + Recv() (*Feature, error) + grpc.ClientStream +} + +type routeGuideListFeaturesClient struct { + grpc.ClientStream +} + +func (x *routeGuideListFeaturesClient) Recv() (*Feature, error) { + m := new(Feature) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *routeGuideClient) RecordRoute(ctx context.Context, opts ...grpc.CallOption) (RouteGuide_RecordRouteClient, error) { + stream, err := grpc.NewClientStream(ctx, &_RouteGuide_serviceDesc.Streams[1], c.cc, "/proto.RouteGuide/RecordRoute", opts...) + if err != nil { + return nil, err + } + x := &routeGuideRecordRouteClient{stream} + return x, nil +} + +type RouteGuide_RecordRouteClient interface { + Send(*Point) error + CloseAndRecv() (*RouteSummary, error) + grpc.ClientStream +} + +type routeGuideRecordRouteClient struct { + grpc.ClientStream +} + +func (x *routeGuideRecordRouteClient) Send(m *Point) error { + return x.ClientStream.SendMsg(m) +} + +func (x *routeGuideRecordRouteClient) CloseAndRecv() (*RouteSummary, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(RouteSummary) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *routeGuideClient) RouteChat(ctx context.Context, opts ...grpc.CallOption) (RouteGuide_RouteChatClient, error) { + stream, err := grpc.NewClientStream(ctx, &_RouteGuide_serviceDesc.Streams[2], c.cc, "/proto.RouteGuide/RouteChat", opts...) + if err != nil { + return nil, err + } + x := &routeGuideRouteChatClient{stream} + return x, nil +} + +type RouteGuide_RouteChatClient interface { + Send(*RouteNote) error + Recv() (*RouteNote, error) + grpc.ClientStream +} + +type routeGuideRouteChatClient struct { + grpc.ClientStream +} + +func (x *routeGuideRouteChatClient) Send(m *RouteNote) error { + return x.ClientStream.SendMsg(m) +} + +func (x *routeGuideRouteChatClient) Recv() (*RouteNote, error) { + m := new(RouteNote) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for RouteGuide service + +type RouteGuideServer interface { + // A simple RPC. + // + // Obtains the feature at a given position. + // + // If no feature is found for the given point, a feature with an empty name + // should be returned. + GetFeature(context.Context, *Point) (*Feature, error) + // A server-to-client streaming RPC. + // + // Obtains the Features available within the given Rectangle. Results are + // streamed rather than returned at once (e.g. in a response message with a + // repeated field), as the rectangle may cover a large area and contain a + // huge number of features. + ListFeatures(*Rectangle, RouteGuide_ListFeaturesServer) error + // A client-to-server streaming RPC. + // + // Accepts a stream of Points on a route being traversed, returning a + // RouteSummary when traversal is completed. + RecordRoute(RouteGuide_RecordRouteServer) error + // A Bidirectional streaming RPC. + // + // Accepts a stream of RouteNotes sent while a route is being traversed, + // while receiving other RouteNotes (e.g. from other users). + RouteChat(RouteGuide_RouteChatServer) error +} + +func RegisterRouteGuideServer(s *grpc.Server, srv RouteGuideServer) { + s.RegisterService(&_RouteGuide_serviceDesc, srv) +} + +func _RouteGuide_GetFeature_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { + in := new(Point) + if err := codec.Unmarshal(buf, in); err != nil { + return nil, err + } + out, err := srv.(RouteGuideServer).GetFeature(ctx, in) + if err != nil { + return nil, err + } + return out, nil +} + +func _RouteGuide_ListFeatures_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(Rectangle) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(RouteGuideServer).ListFeatures(m, &routeGuideListFeaturesServer{stream}) +} + +type RouteGuide_ListFeaturesServer interface { + Send(*Feature) error + grpc.ServerStream +} + +type routeGuideListFeaturesServer struct { + grpc.ServerStream +} + +func (x *routeGuideListFeaturesServer) Send(m *Feature) error { + return x.ServerStream.SendMsg(m) +} + +func _RouteGuide_RecordRoute_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(RouteGuideServer).RecordRoute(&routeGuideRecordRouteServer{stream}) +} + +type RouteGuide_RecordRouteServer interface { + SendAndClose(*RouteSummary) error + Recv() (*Point, error) + grpc.ServerStream +} + +type routeGuideRecordRouteServer struct { + grpc.ServerStream +} + +func (x *routeGuideRecordRouteServer) SendAndClose(m *RouteSummary) error { + return x.ServerStream.SendMsg(m) +} + +func (x *routeGuideRecordRouteServer) Recv() (*Point, error) { + m := new(Point) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _RouteGuide_RouteChat_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(RouteGuideServer).RouteChat(&routeGuideRouteChatServer{stream}) +} + +type RouteGuide_RouteChatServer interface { + Send(*RouteNote) error + Recv() (*RouteNote, error) + grpc.ServerStream +} + +type routeGuideRouteChatServer struct { + grpc.ServerStream +} + +func (x *routeGuideRouteChatServer) Send(m *RouteNote) error { + return x.ServerStream.SendMsg(m) +} + +func (x *routeGuideRouteChatServer) Recv() (*RouteNote, error) { + m := new(RouteNote) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _RouteGuide_serviceDesc = grpc.ServiceDesc{ + ServiceName: "proto.RouteGuide", + HandlerType: (*RouteGuideServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetFeature", + Handler: _RouteGuide_GetFeature_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "ListFeatures", + Handler: _RouteGuide_ListFeatures_Handler, + ServerStreams: true, + }, + { + StreamName: "RecordRoute", + Handler: _RouteGuide_RecordRoute_Handler, + ClientStreams: true, + }, + { + StreamName: "RouteChat", + Handler: _RouteGuide_RouteChat_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.proto b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.proto new file mode 100644 index 000000000..bee7ac51a --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.proto @@ -0,0 +1,121 @@ +// Copyright 2015, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package routeguide; + +// Interface exported by the server. +service RouteGuide { + // A simple RPC. + // + // Obtains the feature at a given position. + // + // If no feature is found for the given point, a feature with an empty name + // should be returned. + rpc GetFeature(Point) returns (Feature) {} + + // A server-to-client streaming RPC. + // + // Obtains the Features available within the given Rectangle. Results are + // streamed rather than returned at once (e.g. in a response message with a + // repeated field), as the rectangle may cover a large area and contain a + // huge number of features. + rpc ListFeatures(Rectangle) returns (stream Feature) {} + + // A client-to-server streaming RPC. + // + // Accepts a stream of Points on a route being traversed, returning a + // RouteSummary when traversal is completed. + rpc RecordRoute(stream Point) returns (RouteSummary) {} + + // A Bidirectional streaming RPC. + // + // Accepts a stream of RouteNotes sent while a route is being traversed, + // while receiving other RouteNotes (e.g. from other users). + rpc RouteChat(stream RouteNote) returns (stream RouteNote) {} +} + +// Points are represented as latitude-longitude pairs in the E7 representation +// (degrees multiplied by 10**7 and rounded to the nearest integer). +// Latitudes should be in the range +/- 90 degrees and longitude should be in +// the range +/- 180 degrees (inclusive). +message Point { + int32 latitude = 1; + int32 longitude = 2; +} + +// A latitude-longitude rectangle, represented as two diagonally opposite +// points "lo" and "hi". +message Rectangle { + // One corner of the rectangle. + Point lo = 1; + + // The other corner of the rectangle. + Point hi = 2; +} + +// A feature names something at a given point. +// +// If a feature could not be named, the name is empty. +message Feature { + // The name of the feature. + string name = 1; + + // The point where the feature is detected. + Point location = 2; +} + +// A RouteNote is a message sent while at a given point. +message RouteNote { + // The location from which the message is sent. + Point location = 1; + + // The message to be sent. + string message = 2; +} + +// A RouteSummary is received in response to a RecordRoute rpc. +// +// It contains the number of individual points received, the number of +// detected features, and the total distance covered as the cumulative sum of +// the distance between each point. +message RouteSummary { + // The number of points received. + int32 point_count = 1; + + // The number of known features passed while traversing the route. + int32 feature_count = 2; + + // The distance covered in metres. + int32 distance = 3; + + // The duration of the traversal in seconds. + int32 elapsed_time = 4; +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/server/server.go b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/server/server.go new file mode 100644 index 000000000..09b3942d1 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/server/server.go @@ -0,0 +1,239 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// Package main implements a simple gRPC server that demonstrates how to use gRPC-Go libraries +// to perform unary, client streaming, server streaming and full duplex RPCs. +// +// It implements the route guide service whose definition can be found in proto/route_guide.proto. +package main + +import ( + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "math" + "net" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + + "github.com/golang/protobuf/proto" + + pb "google.golang.org/grpc/examples/route_guide/routeguide" +) + +var ( + tls = flag.Bool("tls", false, "Connection uses TLS if true, else plain TCP") + certFile = flag.String("cert_file", "testdata/server1.pem", "The TLS cert file") + keyFile = flag.String("key_file", "testdata/server1.key", "The TLS key file") + jsonDBFile = flag.String("json_db_file", "testdata/route_guide_db.json", "A json file containing a list of features") + port = flag.Int("port", 10000, "The server port") +) + +type routeGuideServer struct { + savedFeatures []*pb.Feature + routeNotes map[string][]*pb.RouteNote +} + +// GetFeature returns the feature at the given point. +func (s *routeGuideServer) GetFeature(ctx context.Context, point *pb.Point) (*pb.Feature, error) { + for _, feature := range s.savedFeatures { + if proto.Equal(feature.Location, point) { + return feature, nil + } + } + // No feature was found, return an unnamed feature + return &pb.Feature{"", point}, nil +} + +// ListFeatures lists all features comtained within the given bounding Rectangle. +func (s *routeGuideServer) ListFeatures(rect *pb.Rectangle, stream pb.RouteGuide_ListFeaturesServer) error { + for _, feature := range s.savedFeatures { + if inRange(feature.Location, rect) { + if err := stream.Send(feature); err != nil { + return err + } + } + } + return nil +} + +// RecordRoute records a route composited of a sequence of points. +// +// It gets a stream of points, and responds with statistics about the "trip": +// number of points, number of known features visited, total distance traveled, and +// total time spent. +func (s *routeGuideServer) RecordRoute(stream pb.RouteGuide_RecordRouteServer) error { + var pointCount, featureCount, distance int32 + var lastPoint *pb.Point + startTime := time.Now() + for { + point, err := stream.Recv() + if err == io.EOF { + endTime := time.Now() + return stream.SendAndClose(&pb.RouteSummary{ + PointCount: pointCount, + FeatureCount: featureCount, + Distance: distance, + ElapsedTime: int32(endTime.Sub(startTime).Seconds()), + }) + } + if err != nil { + return err + } + pointCount++ + for _, feature := range s.savedFeatures { + if proto.Equal(feature.Location, point) { + featureCount++ + } + } + if lastPoint != nil { + distance += calcDistance(lastPoint, point) + } + lastPoint = point + } +} + +// RouteChat receives a stream of message/location pairs, and responds with a stream of all +// previous messages at each of those locations. +func (s *routeGuideServer) RouteChat(stream pb.RouteGuide_RouteChatServer) error { + for { + in, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + key := serialize(in.Location) + if _, present := s.routeNotes[key]; !present { + s.routeNotes[key] = []*pb.RouteNote{in} + } else { + s.routeNotes[key] = append(s.routeNotes[key], in) + } + for _, note := range s.routeNotes[key] { + if err := stream.Send(note); err != nil { + return err + } + } + } +} + +// loadFeatures loads features from a JSON file. +func (s *routeGuideServer) loadFeatures(filePath string) { + file, err := ioutil.ReadFile(filePath) + if err != nil { + grpclog.Fatalf("Failed to load default features: %v", err) + } + if err := json.Unmarshal(file, &s.savedFeatures); err != nil { + grpclog.Fatalf("Failed to load default features: %v", err) + } +} + +func toRadians(num float64) float64 { + return num * math.Pi / float64(180) +} + +// calcDistance calculates the distance between two points using the "haversine" formula. +// This code was taken from http://www.movable-type.co.uk/scripts/latlong.html. +func calcDistance(p1 *pb.Point, p2 *pb.Point) int32 { + const CordFactor float64 = 1e7 + const R float64 = float64(6371000) // metres + lat1 := float64(p1.Latitude) / CordFactor + lat2 := float64(p2.Latitude) / CordFactor + lng1 := float64(p1.Longitude) / CordFactor + lng2 := float64(p2.Longitude) / CordFactor + φ1 := toRadians(lat1) + φ2 := toRadians(lat2) + Δφ := toRadians(lat2 - lat1) + Δλ := toRadians(lng2 - lng1) + + a := math.Sin(Δφ/2)*math.Sin(Δφ/2) + + math.Cos(φ1)*math.Cos(φ2)* + math.Sin(Δλ/2)*math.Sin(Δλ/2) + c := 2 * math.Atan2(math.Sqrt(a), math.Sqrt(1-a)) + + distance := R * c + return int32(distance) +} + +func inRange(point *pb.Point, rect *pb.Rectangle) bool { + left := math.Min(float64(rect.Lo.Longitude), float64(rect.Hi.Longitude)) + right := math.Max(float64(rect.Lo.Longitude), float64(rect.Hi.Longitude)) + top := math.Max(float64(rect.Lo.Latitude), float64(rect.Hi.Latitude)) + bottom := math.Min(float64(rect.Lo.Latitude), float64(rect.Hi.Latitude)) + + if float64(point.Longitude) >= left && + float64(point.Longitude) <= right && + float64(point.Latitude) >= bottom && + float64(point.Latitude) <= top { + return true + } + return false +} + +func serialize(point *pb.Point) string { + return fmt.Sprintf("%d %d", point.Latitude, point.Longitude) +} + +func newServer() *routeGuideServer { + s := new(routeGuideServer) + s.loadFeatures(*jsonDBFile) + s.routeNotes = make(map[string][]*pb.RouteNote) + return s +} + +func main() { + flag.Parse() + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) + if err != nil { + grpclog.Fatalf("failed to listen: %v", err) + } + var opts []grpc.ServerOption + if *tls { + creds, err := credentials.NewServerTLSFromFile(*certFile, *keyFile) + if err != nil { + grpclog.Fatalf("Failed to generate credentials %v", err) + } + opts = []grpc.ServerOption{grpc.Creds(creds)} + } + grpcServer := grpc.NewServer(opts...) + pb.RegisterRouteGuideServer(grpcServer, newServer()) + grpcServer.Serve(lis) +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/testdata/ca.pem b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/testdata/ca.pem new file mode 100644 index 000000000..999da8977 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/testdata/ca.pem @@ -0,0 +1,14 @@ +-----BEGIN CERTIFICATE----- +MIICIzCCAYwCCQCFTbF7XNSvvjANBgkqhkiG9w0BAQUFADBWMQswCQYDVQQGEwJB +VTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0 +cyBQdHkgTHRkMQ8wDQYDVQQDDAZ0ZXN0Y2EwHhcNMTQwNzE3MjMxNzUxWhcNMjQw +NzE0MjMxNzUxWjBWMQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEh +MB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMQ8wDQYDVQQDDAZ0ZXN0 +Y2EwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMBA3wVeTGHZR1Rye/i+J8a2 +cu5gXwFV6TnObzGM7bLFCO5i9v4mLo4iFzPsHmWDUxKS3Y8iXbu0eYBlLoNY0lSv +xDx33O+DuwMmVN+DzSD+Eod9zfvwOWHsazYCZT2PhNxnVWIuJXViY4JAHUGodjx+ +QAi6yCAurUZGvYXGgZSBAgMBAAEwDQYJKoZIhvcNAQEFBQADgYEAQoQVD8bwdtWJ +AniGBwcCfqYyH+/KpA10AcebJVVTyYbYvI9Q8d6RSVu4PZy9OALHR/QrWBdYTAyz +fNAmc2cmdkSRJzjhIaOstnQy1J+Fk0T9XyvQtq499yFbq9xogUVlEGH62xP6vH0Y +5ukK//dCPAzA11YuX2rnex0JhuTQfcI= +-----END CERTIFICATE----- diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/testdata/route_guide_db.json b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/testdata/route_guide_db.json new file mode 100644 index 000000000..9d6a980ab --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/testdata/route_guide_db.json @@ -0,0 +1,601 @@ +[{ + "location": { + "latitude": 407838351, + "longitude": -746143763 + }, + "name": "Patriots Path, Mendham, NJ 07945, USA" +}, { + "location": { + "latitude": 408122808, + "longitude": -743999179 + }, + "name": "101 New Jersey 10, Whippany, NJ 07981, USA" +}, { + "location": { + "latitude": 413628156, + "longitude": -749015468 + }, + "name": "U.S. 6, Shohola, PA 18458, USA" +}, { + "location": { + "latitude": 419999544, + "longitude": -740371136 + }, + "name": "5 Conners Road, Kingston, NY 12401, USA" +}, { + "location": { + "latitude": 414008389, + "longitude": -743951297 + }, + "name": "Mid Hudson Psychiatric Center, New Hampton, NY 10958, USA" +}, { + "location": { + "latitude": 419611318, + "longitude": -746524769 + }, + "name": "287 Flugertown Road, Livingston Manor, NY 12758, USA" +}, { + "location": { + "latitude": 406109563, + "longitude": -742186778 + }, + "name": "4001 Tremley Point Road, Linden, NJ 07036, USA" +}, { + "location": { + "latitude": 416802456, + "longitude": -742370183 + }, + "name": "352 South Mountain Road, Wallkill, NY 12589, USA" +}, { + "location": { + "latitude": 412950425, + "longitude": -741077389 + }, + "name": "Bailey Turn Road, Harriman, NY 10926, USA" +}, { + "location": { + "latitude": 412144655, + "longitude": -743949739 + }, + "name": "193-199 Wawayanda Road, Hewitt, NJ 07421, USA" +}, { + "location": { + "latitude": 415736605, + "longitude": -742847522 + }, + "name": "406-496 Ward Avenue, Pine Bush, NY 12566, USA" +}, { + "location": { + "latitude": 413843930, + "longitude": -740501726 + }, + "name": "162 Merrill Road, Highland Mills, NY 10930, USA" +}, { + "location": { + "latitude": 410873075, + "longitude": -744459023 + }, + "name": "Clinton Road, West Milford, NJ 07480, USA" +}, { + "location": { + "latitude": 412346009, + "longitude": -744026814 + }, + "name": "16 Old Brook Lane, Warwick, NY 10990, USA" +}, { + "location": { + "latitude": 402948455, + "longitude": -747903913 + }, + "name": "3 Drake Lane, Pennington, NJ 08534, USA" +}, { + "location": { + "latitude": 406337092, + "longitude": -740122226 + }, + "name": "6324 8th Avenue, Brooklyn, NY 11220, USA" +}, { + "location": { + "latitude": 406421967, + "longitude": -747727624 + }, + "name": "1 Merck Access Road, Whitehouse Station, NJ 08889, USA" +}, { + "location": { + "latitude": 416318082, + "longitude": -749677716 + }, + "name": "78-98 Schalck Road, Narrowsburg, NY 12764, USA" +}, { + "location": { + "latitude": 415301720, + "longitude": -748416257 + }, + "name": "282 Lakeview Drive Road, Highland Lake, NY 12743, USA" +}, { + "location": { + "latitude": 402647019, + "longitude": -747071791 + }, + "name": "330 Evelyn Avenue, Hamilton Township, NJ 08619, USA" +}, { + "location": { + "latitude": 412567807, + "longitude": -741058078 + }, + "name": "New York State Reference Route 987E, Southfields, NY 10975, USA" +}, { + "location": { + "latitude": 416855156, + "longitude": -744420597 + }, + "name": "103-271 Tempaloni Road, Ellenville, NY 12428, USA" +}, { + "location": { + "latitude": 404663628, + "longitude": -744820157 + }, + "name": "1300 Airport Road, North Brunswick Township, NJ 08902, USA" +}, { + "location": { + "latitude": 407113723, + "longitude": -749746483 + }, + "name": "" +}, { + "location": { + "latitude": 402133926, + "longitude": -743613249 + }, + "name": "" +}, { + "location": { + "latitude": 400273442, + "longitude": -741220915 + }, + "name": "" +}, { + "location": { + "latitude": 411236786, + "longitude": -744070769 + }, + "name": "" +}, { + "location": { + "latitude": 411633782, + "longitude": -746784970 + }, + "name": "211-225 Plains Road, Augusta, NJ 07822, USA" +}, { + "location": { + "latitude": 415830701, + "longitude": -742952812 + }, + "name": "" +}, { + "location": { + "latitude": 413447164, + "longitude": -748712898 + }, + "name": "165 Pedersen Ridge Road, Milford, PA 18337, USA" +}, { + "location": { + "latitude": 405047245, + "longitude": -749800722 + }, + "name": "100-122 Locktown Road, Frenchtown, NJ 08825, USA" +}, { + "location": { + "latitude": 418858923, + "longitude": -746156790 + }, + "name": "" +}, { + "location": { + "latitude": 417951888, + "longitude": -748484944 + }, + "name": "650-652 Willi Hill Road, Swan Lake, NY 12783, USA" +}, { + "location": { + "latitude": 407033786, + "longitude": -743977337 + }, + "name": "26 East 3rd Street, New Providence, NJ 07974, USA" +}, { + "location": { + "latitude": 417548014, + "longitude": -740075041 + }, + "name": "" +}, { + "location": { + "latitude": 410395868, + "longitude": -744972325 + }, + "name": "" +}, { + "location": { + "latitude": 404615353, + "longitude": -745129803 + }, + "name": "" +}, { + "location": { + "latitude": 406589790, + "longitude": -743560121 + }, + "name": "611 Lawrence Avenue, Westfield, NJ 07090, USA" +}, { + "location": { + "latitude": 414653148, + "longitude": -740477477 + }, + "name": "18 Lannis Avenue, New Windsor, NY 12553, USA" +}, { + "location": { + "latitude": 405957808, + "longitude": -743255336 + }, + "name": "82-104 Amherst Avenue, Colonia, NJ 07067, USA" +}, { + "location": { + "latitude": 411733589, + "longitude": -741648093 + }, + "name": "170 Seven Lakes Drive, Sloatsburg, NY 10974, USA" +}, { + "location": { + "latitude": 412676291, + "longitude": -742606606 + }, + "name": "1270 Lakes Road, Monroe, NY 10950, USA" +}, { + "location": { + "latitude": 409224445, + "longitude": -748286738 + }, + "name": "509-535 Alphano Road, Great Meadows, NJ 07838, USA" +}, { + "location": { + "latitude": 406523420, + "longitude": -742135517 + }, + "name": "652 Garden Street, Elizabeth, NJ 07202, USA" +}, { + "location": { + "latitude": 401827388, + "longitude": -740294537 + }, + "name": "349 Sea Spray Court, Neptune City, NJ 07753, USA" +}, { + "location": { + "latitude": 410564152, + "longitude": -743685054 + }, + "name": "13-17 Stanley Street, West Milford, NJ 07480, USA" +}, { + "location": { + "latitude": 408472324, + "longitude": -740726046 + }, + "name": "47 Industrial Avenue, Teterboro, NJ 07608, USA" +}, { + "location": { + "latitude": 412452168, + "longitude": -740214052 + }, + "name": "5 White Oak Lane, Stony Point, NY 10980, USA" +}, { + "location": { + "latitude": 409146138, + "longitude": -746188906 + }, + "name": "Berkshire Valley Management Area Trail, Jefferson, NJ, USA" +}, { + "location": { + "latitude": 404701380, + "longitude": -744781745 + }, + "name": "1007 Jersey Avenue, New Brunswick, NJ 08901, USA" +}, { + "location": { + "latitude": 409642566, + "longitude": -746017679 + }, + "name": "6 East Emerald Isle Drive, Lake Hopatcong, NJ 07849, USA" +}, { + "location": { + "latitude": 408031728, + "longitude": -748645385 + }, + "name": "1358-1474 New Jersey 57, Port Murray, NJ 07865, USA" +}, { + "location": { + "latitude": 413700272, + "longitude": -742135189 + }, + "name": "367 Prospect Road, Chester, NY 10918, USA" +}, { + "location": { + "latitude": 404310607, + "longitude": -740282632 + }, + "name": "10 Simon Lake Drive, Atlantic Highlands, NJ 07716, USA" +}, { + "location": { + "latitude": 409319800, + "longitude": -746201391 + }, + "name": "11 Ward Street, Mount Arlington, NJ 07856, USA" +}, { + "location": { + "latitude": 406685311, + "longitude": -742108603 + }, + "name": "300-398 Jefferson Avenue, Elizabeth, NJ 07201, USA" +}, { + "location": { + "latitude": 419018117, + "longitude": -749142781 + }, + "name": "43 Dreher Road, Roscoe, NY 12776, USA" +}, { + "location": { + "latitude": 412856162, + "longitude": -745148837 + }, + "name": "Swan Street, Pine Island, NY 10969, USA" +}, { + "location": { + "latitude": 416560744, + "longitude": -746721964 + }, + "name": "66 Pleasantview Avenue, Monticello, NY 12701, USA" +}, { + "location": { + "latitude": 405314270, + "longitude": -749836354 + }, + "name": "" +}, { + "location": { + "latitude": 414219548, + "longitude": -743327440 + }, + "name": "" +}, { + "location": { + "latitude": 415534177, + "longitude": -742900616 + }, + "name": "565 Winding Hills Road, Montgomery, NY 12549, USA" +}, { + "location": { + "latitude": 406898530, + "longitude": -749127080 + }, + "name": "231 Rocky Run Road, Glen Gardner, NJ 08826, USA" +}, { + "location": { + "latitude": 407586880, + "longitude": -741670168 + }, + "name": "100 Mount Pleasant Avenue, Newark, NJ 07104, USA" +}, { + "location": { + "latitude": 400106455, + "longitude": -742870190 + }, + "name": "517-521 Huntington Drive, Manchester Township, NJ 08759, USA" +}, { + "location": { + "latitude": 400066188, + "longitude": -746793294 + }, + "name": "" +}, { + "location": { + "latitude": 418803880, + "longitude": -744102673 + }, + "name": "40 Mountain Road, Napanoch, NY 12458, USA" +}, { + "location": { + "latitude": 414204288, + "longitude": -747895140 + }, + "name": "" +}, { + "location": { + "latitude": 414777405, + "longitude": -740615601 + }, + "name": "" +}, { + "location": { + "latitude": 415464475, + "longitude": -747175374 + }, + "name": "48 North Road, Forestburgh, NY 12777, USA" +}, { + "location": { + "latitude": 404062378, + "longitude": -746376177 + }, + "name": "" +}, { + "location": { + "latitude": 405688272, + "longitude": -749285130 + }, + "name": "" +}, { + "location": { + "latitude": 400342070, + "longitude": -748788996 + }, + "name": "" +}, { + "location": { + "latitude": 401809022, + "longitude": -744157964 + }, + "name": "" +}, { + "location": { + "latitude": 404226644, + "longitude": -740517141 + }, + "name": "9 Thompson Avenue, Leonardo, NJ 07737, USA" +}, { + "location": { + "latitude": 410322033, + "longitude": -747871659 + }, + "name": "" +}, { + "location": { + "latitude": 407100674, + "longitude": -747742727 + }, + "name": "" +}, { + "location": { + "latitude": 418811433, + "longitude": -741718005 + }, + "name": "213 Bush Road, Stone Ridge, NY 12484, USA" +}, { + "location": { + "latitude": 415034302, + "longitude": -743850945 + }, + "name": "" +}, { + "location": { + "latitude": 411349992, + "longitude": -743694161 + }, + "name": "" +}, { + "location": { + "latitude": 404839914, + "longitude": -744759616 + }, + "name": "1-17 Bergen Court, New Brunswick, NJ 08901, USA" +}, { + "location": { + "latitude": 414638017, + "longitude": -745957854 + }, + "name": "35 Oakland Valley Road, Cuddebackville, NY 12729, USA" +}, { + "location": { + "latitude": 412127800, + "longitude": -740173578 + }, + "name": "" +}, { + "location": { + "latitude": 401263460, + "longitude": -747964303 + }, + "name": "" +}, { + "location": { + "latitude": 412843391, + "longitude": -749086026 + }, + "name": "" +}, { + "location": { + "latitude": 418512773, + "longitude": -743067823 + }, + "name": "" +}, { + "location": { + "latitude": 404318328, + "longitude": -740835638 + }, + "name": "42-102 Main Street, Belford, NJ 07718, USA" +}, { + "location": { + "latitude": 419020746, + "longitude": -741172328 + }, + "name": "" +}, { + "location": { + "latitude": 404080723, + "longitude": -746119569 + }, + "name": "" +}, { + "location": { + "latitude": 401012643, + "longitude": -744035134 + }, + "name": "" +}, { + "location": { + "latitude": 404306372, + "longitude": -741079661 + }, + "name": "" +}, { + "location": { + "latitude": 403966326, + "longitude": -748519297 + }, + "name": "" +}, { + "location": { + "latitude": 405002031, + "longitude": -748407866 + }, + "name": "" +}, { + "location": { + "latitude": 409532885, + "longitude": -742200683 + }, + "name": "" +}, { + "location": { + "latitude": 416851321, + "longitude": -742674555 + }, + "name": "" +}, { + "location": { + "latitude": 406411633, + "longitude": -741722051 + }, + "name": "3387 Richmond Terrace, Staten Island, NY 10303, USA" +}, { + "location": { + "latitude": 413069058, + "longitude": -744597778 + }, + "name": "261 Van Sickle Road, Goshen, NY 10924, USA" +}, { + "location": { + "latitude": 418465462, + "longitude": -746859398 + }, + "name": "" +}, { + "location": { + "latitude": 411733222, + "longitude": -744228360 + }, + "name": "" +}, { + "location": { + "latitude": 410248224, + "longitude": -747127767 + }, + "name": "3 Hasta Way, Newton, NJ 07860, USA" +}] diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/testdata/server1.key b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/testdata/server1.key new file mode 100644 index 000000000..ed00cd500 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/testdata/server1.key @@ -0,0 +1,15 @@ +-----BEGIN RSA PRIVATE KEY----- +MIICWwIBAAKBgQDhwxUnKCwlSaWAwzOB2LSHVegJHv7DDWminTg4wzLLsf+LQ8nZ +bpjfn5vgIzxCuRh4Rp9QYM5FhfrJX9wcYawP/HTbJ7p7LVQO2QYAP+akMTHxgKuM +BzVV++3wWToKfVZUjFX8nfTfGMGwWAHJDnlEGnU4tl9UujoCV4ENJtzFoQIDAQAB +AoGAJ+6hpzNr24yTQZtFWQpDpEyFplddKJMOxDya3S9ppK3vTWrIITV2xNcucw7I +ceTbdyrGsyjsU0/HdCcIf9ym2jfmGLUwmyhltKVw0QYcFB0XLkc0nI5YvEYoeVDg +omZIXn1E3EW+sSIWSbkMu9bY2kstKXR2UZmMgWDtmBEPMaECQQD6yT4TAZM5hGBb +ciBKgMUP6PwOhPhOMPIvijO50Aiu6iuCV88l1QIy38gWVhxjNrq6P346j4IBg+kB +9alwpCODAkEA5nSnm9k6ykYeQWNS0fNWiRinCdl23A7usDGSuKKlm019iomJ/Rgd +MKDOp0q/2OostbteOWM2MRFf4jMH3wyVCwJAfAdjJ8szoNKTRSagSbh9vWygnB2v +IByc6l4TTuZQJRGzCveafz9lovuB3WohCABdQRd9ukCXL2CpsEpqzkafOQJAJUjc +USedDlq3zGZwYM1Yw8d8RuirBUFZNqJelYai+nRYClDkRVFgb5yksoYycbq5TxGo +VeqKOvgPpj4RWPHlLwJAGUMk3bqT91xBUCnLRs/vfoCpHpg6eywQTBDAV6xkyz4a +RH3I7/+yj3ZxR2JoWHgUwZ7lZk1VnhffFye7SBXyag== +-----END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/testdata/server1.pem b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/testdata/server1.pem new file mode 100644 index 000000000..8e582e571 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/testdata/server1.pem @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE----- +MIICmzCCAgSgAwIBAgIBAzANBgkqhkiG9w0BAQUFADBWMQswCQYDVQQGEwJBVTET +MBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQ +dHkgTHRkMQ8wDQYDVQQDDAZ0ZXN0Y2EwHhcNMTQwNzIyMDYwMDU3WhcNMjQwNzE5 +MDYwMDU3WjBkMQswCQYDVQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNV +BAcTB0NoaWNhZ28xFDASBgNVBAoTC0dvb2dsZSBJbmMuMRowGAYDVQQDFBEqLnRl +c3QuZ29vZ2xlLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA4cMVJygs +JUmlgMMzgdi0h1XoCR7+ww1pop04OMMyy7H/i0PJ2W6Y35+b4CM8QrkYeEafUGDO +RYX6yV/cHGGsD/x02ye6ey1UDtkGAD/mpDEx8YCrjAc1Vfvt8Fk6Cn1WVIxV/J30 +3xjBsFgByQ55RBp1OLZfVLo6AleBDSbcxaECAwEAAaNrMGkwCQYDVR0TBAIwADAL +BgNVHQ8EBAMCBeAwTwYDVR0RBEgwRoIQKi50ZXN0Lmdvb2dsZS5mcoIYd2F0ZXJ6 +b29pLnRlc3QuZ29vZ2xlLmJlghIqLnRlc3QueW91dHViZS5jb22HBMCoAQMwDQYJ +KoZIhvcNAQEFBQADgYEAM2Ii0LgTGbJ1j4oqX9bxVcxm+/R5Yf8oi0aZqTJlnLYS +wXcBykxTx181s7WyfJ49WwrYXo78zTDAnf1ma0fPq3e4mpspvyndLh1a+OarHa1e +aT0DIIYk7qeEa1YcVljx2KyLd0r1BBAfrwyGaEPVeJQVYWaOJRU2we/KD4ojf9s= +-----END CERTIFICATE----- diff --git a/Godeps/_workspace/src/google.golang.org/grpc/grpc-auth-support.md b/Godeps/_workspace/src/google.golang.org/grpc/grpc-auth-support.md new file mode 100644 index 000000000..f80cbbdb2 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/grpc-auth-support.md @@ -0,0 +1,41 @@ +# Authentication + +As outlined here gRPC supports a number of different mechanisms for asserting identity between an client and server. We'll present some code-samples here demonstrating how to provide TLS support encryption and identity assertions as well as passing OAuth2 tokens to services that support it. + +# Enabling TLS on a gRPC client + +```Go +conn, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, "")) +``` + +# Enabling TLS on a gRPC server + +```Go +creds, err := credentials.NewServerTLSFromFile(certFile, keyFile) +if err != nil { + log.Fatalf("Failed to generate credentials %v", err) +} +lis, err := net.Listen("tcp", ":0") +server := grpc.NewServer(grpc.Creds(creds)) +... +server.Serve(lis) +``` + +# Authenticating with Google + +## Google Compute Engine (GCE) + +```Go +conn, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, ""), grpc.WithPerRPCCredentials(oauth.NewComputeEngine()))) +``` + +## JWT + +```Go +jwtCreds, err := oauth.NewServiceAccountFromFile(*serviceAccountKeyFile, *oauthScope) +if err != nil { + log.Fatalf("Failed to create JWT credentials: %v", err) +} +conn, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, ""), grpc.WithPerRPCCredentials(jwtCreds))) +``` + diff --git a/Godeps/_workspace/src/google.golang.org/grpc/grpclog/glogger/glogger.go b/Godeps/_workspace/src/google.golang.org/grpc/grpclog/glogger/glogger.go new file mode 100644 index 000000000..53e3c539f --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/grpclog/glogger/glogger.go @@ -0,0 +1,72 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* +Package glogger defines glog-based logging for grpc. +*/ +package glogger + +import ( + "github.com/golang/glog" + "google.golang.org/grpc/grpclog" +) + +func init() { + grpclog.SetLogger(&glogger{}) +} + +type glogger struct{} + +func (g *glogger) Fatal(args ...interface{}) { + glog.Fatal(args...) +} + +func (g *glogger) Fatalf(format string, args ...interface{}) { + glog.Fatalf(format, args...) +} + +func (g *glogger) Fatalln(args ...interface{}) { + glog.Fatalln(args...) +} + +func (g *glogger) Print(args ...interface{}) { + glog.Info(args...) +} + +func (g *glogger) Printf(format string, args ...interface{}) { + glog.Infof(format, args...) +} + +func (g *glogger) Println(args ...interface{}) { + glog.Infoln(args...) +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/grpclog/logger.go b/Godeps/_workspace/src/google.golang.org/grpc/grpclog/logger.go new file mode 100644 index 000000000..cc6e27c06 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/grpclog/logger.go @@ -0,0 +1,90 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* +Package grpclog defines logging for grpc. +*/ +package grpclog + +import ( + "log" + "os" +) + +// Use golang's standard logger by default. +var logger Logger = log.New(os.Stderr, "", log.LstdFlags) + +// Logger mimics golang's standard Logger as an interface. +type Logger interface { + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Fatalln(args ...interface{}) + Print(args ...interface{}) + Printf(format string, args ...interface{}) + Println(args ...interface{}) +} + +// SetLogger sets the logger that is used in grpc. +func SetLogger(l Logger) { + logger = l +} + +// Fatal is equivalent to Print() followed by a call to os.Exit() with a non-zero exit code. +func Fatal(args ...interface{}) { + logger.Fatal(args...) +} + +// Fatalf is equivalent to Printf() followed by a call to os.Exit() with a non-zero exit code. +func Fatalf(format string, args ...interface{}) { + logger.Fatalf(format, args...) +} + +// Fatalln is equivalent to Println() followed by a call to os.Exit()) with a non-zero exit code. +func Fatalln(args ...interface{}) { + logger.Fatalln(args...) +} + +// Print prints to the logger. Arguments are handled in the manner of fmt.Print. +func Print(args ...interface{}) { + logger.Print(args...) +} + +// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. +func Printf(format string, args ...interface{}) { + logger.Printf(format, args...) +} + +// Println prints to the logger. Arguments are handled in the manner of fmt.Println. +func Println(args ...interface{}) { + logger.Println(args...) +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/health/grpc_health_v1alpha/health.pb.go b/Godeps/_workspace/src/google.golang.org/grpc/health/grpc_health_v1alpha/health.pb.go new file mode 100644 index 000000000..6bfbe4973 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/health/grpc_health_v1alpha/health.pb.go @@ -0,0 +1,129 @@ +// Code generated by protoc-gen-go. +// source: health.proto +// DO NOT EDIT! + +/* +Package grpc_health_v1alpha is a generated protocol buffer package. + +It is generated from these files: + health.proto + +It has these top-level messages: + HealthCheckRequest + HealthCheckResponse +*/ +package grpc_health_v1alpha + +import proto "github.com/golang/protobuf/proto" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal + +type HealthCheckResponse_ServingStatus int32 + +const ( + HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0 + HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1 + HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2 +) + +var HealthCheckResponse_ServingStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SERVING", + 2: "NOT_SERVING", +} +var HealthCheckResponse_ServingStatus_value = map[string]int32{ + "UNKNOWN": 0, + "SERVING": 1, + "NOT_SERVING": 2, +} + +func (x HealthCheckResponse_ServingStatus) String() string { + return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x)) +} + +type HealthCheckRequest struct { + Service string `protobuf:"bytes,2,opt,name=service" json:"service,omitempty"` +} + +func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} } +func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) } +func (*HealthCheckRequest) ProtoMessage() {} + +type HealthCheckResponse struct { + Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,enum=grpc.health.v1alpha.HealthCheckResponse_ServingStatus" json:"status,omitempty"` +} + +func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} } +func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) } +func (*HealthCheckResponse) ProtoMessage() {} + +func init() { + proto.RegisterEnum("grpc.health.v1alpha.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// Client API for HealthCheck service + +type HealthCheckClient interface { + Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) +} + +type healthCheckClient struct { + cc *grpc.ClientConn +} + +func NewHealthCheckClient(cc *grpc.ClientConn) HealthCheckClient { + return &healthCheckClient{cc} +} + +func (c *healthCheckClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { + out := new(HealthCheckResponse) + err := grpc.Invoke(ctx, "/grpc.health.v1alpha.HealthCheck/Check", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for HealthCheck service + +type HealthCheckServer interface { + Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) +} + +func RegisterHealthCheckServer(s *grpc.Server, srv HealthCheckServer) { + s.RegisterService(&_HealthCheck_serviceDesc, srv) +} + +func _HealthCheck_Check_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { + in := new(HealthCheckRequest) + if err := codec.Unmarshal(buf, in); err != nil { + return nil, err + } + out, err := srv.(HealthCheckServer).Check(ctx, in) + if err != nil { + return nil, err + } + return out, nil +} + +var _HealthCheck_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.health.v1alpha.HealthCheck", + HandlerType: (*HealthCheckServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Check", + Handler: _HealthCheck_Check_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/health/grpc_health_v1alpha/health.proto b/Godeps/_workspace/src/google.golang.org/grpc/health/grpc_health_v1alpha/health.proto new file mode 100644 index 000000000..1ca5bbc16 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/health/grpc_health_v1alpha/health.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package grpc.health.v1alpha; + +message HealthCheckRequest { + string service = 2; +} + +message HealthCheckResponse { + enum ServingStatus { + UNKNOWN = 0; + SERVING = 1; + NOT_SERVING = 2; + } + ServingStatus status = 1; +} + +service HealthCheck{ + rpc Check(HealthCheckRequest) returns (HealthCheckResponse); +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/health/health.go b/Godeps/_workspace/src/google.golang.org/grpc/health/health.go new file mode 100644 index 000000000..7930bde7f --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/health/health.go @@ -0,0 +1,49 @@ +// Package health provides some utility functions to health-check a server. The implementation +// is based on protobuf. Users need to write their own implementations if other IDLs are used. +package health + +import ( + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + healthpb "google.golang.org/grpc/health/grpc_health_v1alpha" +) + +type HealthServer struct { + mu sync.Mutex + // statusMap stores the serving status of the services this HealthServer monitors. + statusMap map[string]healthpb.HealthCheckResponse_ServingStatus +} + +func NewHealthServer() *HealthServer { + return &HealthServer{ + statusMap: make(map[string]healthpb.HealthCheckResponse_ServingStatus), + } +} + +func (s *HealthServer) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + if in.Service == "" { + // check the server overall health status. + return &healthpb.HealthCheckResponse{ + Status: healthpb.HealthCheckResponse_SERVING, + }, nil + } + if status, ok := s.statusMap[in.Service]; ok { + return &healthpb.HealthCheckResponse{ + Status: status, + }, nil + } + return nil, grpc.Errorf(codes.NotFound, "unknown service") +} + +// SetServingStatus is called when need to reset the serving status of a service +// or insert a new service entry into the statusMap. +func (s *HealthServer) SetServingStatus(service string, status healthpb.HealthCheckResponse_ServingStatus) { + s.mu.Lock() + s.statusMap[service] = status + s.mu.Unlock() +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/interop/client/client.go b/Godeps/_workspace/src/google.golang.org/grpc/interop/client/client.go new file mode 100644 index 000000000..4f715d35e --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/interop/client/client.go @@ -0,0 +1,581 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package main + +import ( + "flag" + "io" + "io/ioutil" + "net" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/oauth" + "google.golang.org/grpc/grpclog" + testpb "google.golang.org/grpc/interop/grpc_testing" + "google.golang.org/grpc/metadata" +) + +var ( + useTLS = flag.Bool("use_tls", false, "Connection uses TLS if true, else plain TCP") + caFile = flag.String("tls_ca_file", "testdata/ca.pem", "The file containning the CA root cert file") + serviceAccountKeyFile = flag.String("service_account_key_file", "", "Path to service account json key file") + oauthScope = flag.String("oauth_scope", "", "The scope for OAuth2 tokens") + defaultServiceAccount = flag.String("default_service_account", "", "Email of GCE default service account") + serverHost = flag.String("server_host", "127.0.0.1", "The server host name") + serverPort = flag.Int("server_port", 10000, "The server port number") + tlsServerName = flag.String("server_host_override", "x.test.youtube.com", "The server name use to verify the hostname returned by TLS handshake if it is not empty. Otherwise, --server_host is used.") + testCase = flag.String("test_case", "large_unary", + `Configure different test cases. Valid options are: + empty_unary : empty (zero bytes) request and response; + large_unary : single request and (large) response; + client_streaming : request streaming with single response; + server_streaming : single request with response streaming; + ping_pong : full-duplex streaming; + empty_stream : full-duplex streaming with zero message; + timeout_on_sleeping_server: fullduplex streaming; + compute_engine_creds: large_unary with compute engine auth; + service_account_creds: large_unary with service account auth; + jwt_token_creds: large_unary with jwt token auth; + per_rpc_creds: large_unary with per rpc token; + oauth2_auth_token: large_unary with oauth2 token auth; + cancel_after_begin: cancellation after metadata has been sent but before payloads are sent; + cancel_after_first_response: cancellation after receiving 1st message from the server.`) +) + +var ( + reqSizes = []int{27182, 8, 1828, 45904} + respSizes = []int{31415, 9, 2653, 58979} + largeReqSize = 271828 + largeRespSize = 314159 +) + +func newPayload(t testpb.PayloadType, size int) *testpb.Payload { + if size < 0 { + grpclog.Fatalf("Requested a response with invalid length %d", size) + } + body := make([]byte, size) + switch t { + case testpb.PayloadType_COMPRESSABLE: + case testpb.PayloadType_UNCOMPRESSABLE: + grpclog.Fatalf("PayloadType UNCOMPRESSABLE is not supported") + default: + grpclog.Fatalf("Unsupported payload type: %d", t) + } + return &testpb.Payload{ + Type: t.Enum(), + Body: body, + } +} + +func doEmptyUnaryCall(tc testpb.TestServiceClient) { + reply, err := tc.EmptyCall(context.Background(), &testpb.Empty{}) + if err != nil { + grpclog.Fatal("/TestService/EmptyCall RPC failed: ", err) + } + if !proto.Equal(&testpb.Empty{}, reply) { + grpclog.Fatalf("/TestService/EmptyCall receives %v, want %v", reply, testpb.Empty{}) + } + grpclog.Println("EmptyUnaryCall done") +} + +func doLargeUnaryCall(tc testpb.TestServiceClient) { + pl := newPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(largeRespSize)), + Payload: pl, + } + reply, err := tc.UnaryCall(context.Background(), req) + if err != nil { + grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) + } + t := reply.GetPayload().GetType() + s := len(reply.GetPayload().GetBody()) + if t != testpb.PayloadType_COMPRESSABLE || s != largeRespSize { + grpclog.Fatalf("Got the reply with type %d len %d; want %d, %d", t, s, testpb.PayloadType_COMPRESSABLE, largeRespSize) + } + grpclog.Println("LargeUnaryCall done") +} + +func doClientStreaming(tc testpb.TestServiceClient) { + stream, err := tc.StreamingInputCall(context.Background()) + if err != nil { + grpclog.Fatalf("%v.StreamingInputCall(_) = _, %v", tc, err) + } + var sum int + for _, s := range reqSizes { + pl := newPayload(testpb.PayloadType_COMPRESSABLE, s) + req := &testpb.StreamingInputCallRequest{ + Payload: pl, + } + if err := stream.Send(req); err != nil { + grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err) + } + sum += s + grpclog.Printf("Sent a request of size %d, aggregated size %d", s, sum) + + } + reply, err := stream.CloseAndRecv() + if err != nil { + grpclog.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) + } + if reply.GetAggregatedPayloadSize() != int32(sum) { + grpclog.Fatalf("%v.CloseAndRecv().GetAggregatePayloadSize() = %v; want %v", stream, reply.GetAggregatedPayloadSize(), sum) + } + grpclog.Println("ClientStreaming done") +} + +func doServerStreaming(tc testpb.TestServiceClient) { + respParam := make([]*testpb.ResponseParameters, len(respSizes)) + for i, s := range respSizes { + respParam[i] = &testpb.ResponseParameters{ + Size: proto.Int32(int32(s)), + } + } + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + } + stream, err := tc.StreamingOutputCall(context.Background(), req) + if err != nil { + grpclog.Fatalf("%v.StreamingOutputCall(_) = _, %v", tc, err) + } + var rpcStatus error + var respCnt int + var index int + for { + reply, err := stream.Recv() + if err != nil { + rpcStatus = err + break + } + t := reply.GetPayload().GetType() + if t != testpb.PayloadType_COMPRESSABLE { + grpclog.Fatalf("Got the reply of type %d, want %d", t, testpb.PayloadType_COMPRESSABLE) + } + size := len(reply.GetPayload().GetBody()) + if size != int(respSizes[index]) { + grpclog.Fatalf("Got reply body of length %d, want %d", size, respSizes[index]) + } + index++ + respCnt++ + } + if rpcStatus != io.EOF { + grpclog.Fatalf("Failed to finish the server streaming rpc: %v", err) + } + if respCnt != len(respSizes) { + grpclog.Fatalf("Got %d reply, want %d", len(respSizes), respCnt) + } + grpclog.Println("ServerStreaming done") +} + +func doPingPong(tc testpb.TestServiceClient) { + stream, err := tc.FullDuplexCall(context.Background()) + if err != nil { + grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) + } + var index int + for index < len(reqSizes) { + respParam := []*testpb.ResponseParameters{ + { + Size: proto.Int32(int32(respSizes[index])), + }, + } + pl := newPayload(testpb.PayloadType_COMPRESSABLE, reqSizes[index]) + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + Payload: pl, + } + if err := stream.Send(req); err != nil { + grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err) + } + reply, err := stream.Recv() + if err != nil { + grpclog.Fatalf("%v.Recv() = %v", stream, err) + } + t := reply.GetPayload().GetType() + if t != testpb.PayloadType_COMPRESSABLE { + grpclog.Fatalf("Got the reply of type %d, want %d", t, testpb.PayloadType_COMPRESSABLE) + } + size := len(reply.GetPayload().GetBody()) + if size != int(respSizes[index]) { + grpclog.Fatalf("Got reply body of length %d, want %d", size, respSizes[index]) + } + index++ + } + if err := stream.CloseSend(); err != nil { + grpclog.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil) + } + if _, err := stream.Recv(); err != io.EOF { + grpclog.Fatalf("%v failed to complele the ping pong test: %v", stream, err) + } + grpclog.Println("Pingpong done") +} + +func doEmptyStream(tc testpb.TestServiceClient) { + stream, err := tc.FullDuplexCall(context.Background()) + if err != nil { + grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) + } + if err := stream.CloseSend(); err != nil { + grpclog.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil) + } + if _, err := stream.Recv(); err != io.EOF { + grpclog.Fatalf("%v failed to complete the empty stream test: %v", stream, err) + } + grpclog.Println("Emptystream done") +} + +func doTimeoutOnSleepingServer(tc testpb.TestServiceClient) { + ctx, _ := context.WithTimeout(context.Background(), 1*time.Millisecond) + stream, err := tc.FullDuplexCall(ctx) + if err != nil { + if grpc.Code(err) == codes.DeadlineExceeded { + grpclog.Println("TimeoutOnSleepingServer done") + return + } + grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) + } + pl := newPayload(testpb.PayloadType_COMPRESSABLE, 27182) + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + Payload: pl, + } + if err := stream.Send(req); err != nil { + grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err) + } + if _, err := stream.Recv(); grpc.Code(err) != codes.DeadlineExceeded { + grpclog.Fatalf("%v.Recv() = _, %v, want error code %d", stream, err, codes.DeadlineExceeded) + } + grpclog.Println("TimeoutOnSleepingServer done") +} + +func doComputeEngineCreds(tc testpb.TestServiceClient) { + pl := newPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(largeRespSize)), + Payload: pl, + FillUsername: proto.Bool(true), + FillOauthScope: proto.Bool(true), + } + reply, err := tc.UnaryCall(context.Background(), req) + if err != nil { + grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) + } + user := reply.GetUsername() + scope := reply.GetOauthScope() + if user != *defaultServiceAccount { + grpclog.Fatalf("Got user name %q, want %q.", user, *defaultServiceAccount) + } + if !strings.Contains(*oauthScope, scope) { + grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, *oauthScope) + } + grpclog.Println("ComputeEngineCreds done") +} + +func getServiceAccountJSONKey() []byte { + jsonKey, err := ioutil.ReadFile(*serviceAccountKeyFile) + if err != nil { + grpclog.Fatalf("Failed to read the service account key file: %v", err) + } + return jsonKey +} + +func doServiceAccountCreds(tc testpb.TestServiceClient) { + pl := newPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(largeRespSize)), + Payload: pl, + FillUsername: proto.Bool(true), + FillOauthScope: proto.Bool(true), + } + reply, err := tc.UnaryCall(context.Background(), req) + if err != nil { + grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) + } + jsonKey := getServiceAccountJSONKey() + user := reply.GetUsername() + scope := reply.GetOauthScope() + if !strings.Contains(string(jsonKey), user) { + grpclog.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey) + } + if !strings.Contains(*oauthScope, scope) { + grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, *oauthScope) + } + grpclog.Println("ServiceAccountCreds done") +} + +func doJWTTokenCreds(tc testpb.TestServiceClient) { + pl := newPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(largeRespSize)), + Payload: pl, + FillUsername: proto.Bool(true), + } + reply, err := tc.UnaryCall(context.Background(), req) + if err != nil { + grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) + } + jsonKey := getServiceAccountJSONKey() + user := reply.GetUsername() + if !strings.Contains(string(jsonKey), user) { + grpclog.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey) + } + grpclog.Println("JWTtokenCreds done") +} + +func getToken() *oauth2.Token { + jsonKey := getServiceAccountJSONKey() + config, err := google.JWTConfigFromJSON(jsonKey, *oauthScope) + if err != nil { + grpclog.Fatalf("Failed to get the config: %v", err) + } + token, err := config.TokenSource(context.Background()).Token() + if err != nil { + grpclog.Fatalf("Failed to get the token: %v", err) + } + return token +} + +func doOauth2TokenCreds(tc testpb.TestServiceClient) { + pl := newPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(largeRespSize)), + Payload: pl, + FillUsername: proto.Bool(true), + FillOauthScope: proto.Bool(true), + } + reply, err := tc.UnaryCall(context.Background(), req) + if err != nil { + grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) + } + jsonKey := getServiceAccountJSONKey() + user := reply.GetUsername() + scope := reply.GetOauthScope() + if !strings.Contains(string(jsonKey), user) { + grpclog.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey) + } + if !strings.Contains(*oauthScope, scope) { + grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, *oauthScope) + } + grpclog.Println("Oauth2TokenCreds done") +} + +func doPerRPCCreds(tc testpb.TestServiceClient) { + jsonKey := getServiceAccountJSONKey() + pl := newPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(largeRespSize)), + Payload: pl, + FillUsername: proto.Bool(true), + FillOauthScope: proto.Bool(true), + } + token := getToken() + kv := map[string]string{"authorization": token.TokenType + " " + token.AccessToken} + ctx := metadata.NewContext(context.Background(), metadata.MD{"authorization": []string{kv["authorization"]}}) + reply, err := tc.UnaryCall(ctx, req) + if err != nil { + grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) + } + user := reply.GetUsername() + scope := reply.GetOauthScope() + if !strings.Contains(string(jsonKey), user) { + grpclog.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey) + } + if !strings.Contains(*oauthScope, scope) { + grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, *oauthScope) + } + grpclog.Println("PerRPCCreds done") +} + +var ( + testMetadata = metadata.MD{ + "key1": []string{"value1"}, + "key2": []string{"value2"}, + } +) + +func doCancelAfterBegin(tc testpb.TestServiceClient) { + ctx, cancel := context.WithCancel(metadata.NewContext(context.Background(), testMetadata)) + stream, err := tc.StreamingInputCall(ctx) + if err != nil { + grpclog.Fatalf("%v.StreamingInputCall(_) = _, %v", tc, err) + } + cancel() + _, err = stream.CloseAndRecv() + if grpc.Code(err) != codes.Canceled { + grpclog.Fatalf("%v.CloseAndRecv() got error code %d, want %d", stream, grpc.Code(err), codes.Canceled) + } + grpclog.Println("CancelAfterBegin done") +} + +func doCancelAfterFirstResponse(tc testpb.TestServiceClient) { + ctx, cancel := context.WithCancel(context.Background()) + stream, err := tc.FullDuplexCall(ctx) + if err != nil { + grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) + } + respParam := []*testpb.ResponseParameters{ + { + Size: proto.Int32(31415), + }, + } + pl := newPayload(testpb.PayloadType_COMPRESSABLE, 27182) + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + Payload: pl, + } + if err := stream.Send(req); err != nil { + grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err) + } + if _, err := stream.Recv(); err != nil { + grpclog.Fatalf("%v.Recv() = %v", stream, err) + } + cancel() + if _, err := stream.Recv(); grpc.Code(err) != codes.Canceled { + grpclog.Fatalf("%v compleled with error code %d, want %d", stream, grpc.Code(err), codes.Canceled) + } + grpclog.Println("CancelAfterFirstResponse done") +} + +func main() { + flag.Parse() + serverAddr := net.JoinHostPort(*serverHost, strconv.Itoa(*serverPort)) + var opts []grpc.DialOption + if *useTLS { + var sn string + if *tlsServerName != "" { + sn = *tlsServerName + } + var creds credentials.TransportAuthenticator + if *caFile != "" { + var err error + creds, err = credentials.NewClientTLSFromFile(*caFile, sn) + if err != nil { + grpclog.Fatalf("Failed to create TLS credentials %v", err) + } + } else { + creds = credentials.NewClientTLSFromCert(nil, sn) + } + opts = append(opts, grpc.WithTransportCredentials(creds)) + if *testCase == "compute_engine_creds" { + opts = append(opts, grpc.WithPerRPCCredentials(oauth.NewComputeEngine())) + } else if *testCase == "service_account_creds" { + jwtCreds, err := oauth.NewServiceAccountFromFile(*serviceAccountKeyFile, *oauthScope) + if err != nil { + grpclog.Fatalf("Failed to create JWT credentials: %v", err) + } + opts = append(opts, grpc.WithPerRPCCredentials(jwtCreds)) + } else if *testCase == "jwt_token_creds" { + jwtCreds, err := oauth.NewJWTAccessFromFile(*serviceAccountKeyFile) + if err != nil { + grpclog.Fatalf("Failed to create JWT credentials: %v", err) + } + opts = append(opts, grpc.WithPerRPCCredentials(jwtCreds)) + } else if *testCase == "oauth2_auth_token" { + opts = append(opts, grpc.WithPerRPCCredentials(oauth.NewOauthAccess(getToken()))) + } + } else { + opts = append(opts, grpc.WithInsecure()) + } + conn, err := grpc.Dial(serverAddr, opts...) + if err != nil { + grpclog.Fatalf("Fail to dial: %v", err) + } + defer conn.Close() + tc := testpb.NewTestServiceClient(conn) + switch *testCase { + case "empty_unary": + doEmptyUnaryCall(tc) + case "large_unary": + doLargeUnaryCall(tc) + case "client_streaming": + doClientStreaming(tc) + case "server_streaming": + doServerStreaming(tc) + case "ping_pong": + doPingPong(tc) + case "empty_stream": + doEmptyStream(tc) + case "timeout_on_sleeping_server": + doTimeoutOnSleepingServer(tc) + case "compute_engine_creds": + if !*useTLS { + grpclog.Fatalf("TLS is not enabled. TLS is required to execute compute_engine_creds test case.") + } + doComputeEngineCreds(tc) + case "service_account_creds": + if !*useTLS { + grpclog.Fatalf("TLS is not enabled. TLS is required to execute service_account_creds test case.") + } + doServiceAccountCreds(tc) + case "jwt_token_creds": + if !*useTLS { + grpclog.Fatalf("TLS is not enabled. TLS is required to execute jwt_token_creds test case.") + } + doJWTTokenCreds(tc) + case "per_rpc_creds": + if !*useTLS { + grpclog.Fatalf("TLS is not enabled. TLS is required to execute per_rpc_creds test case.") + } + doPerRPCCreds(tc) + case "oauth2_auth_token": + if !*useTLS { + grpclog.Fatalf("TLS is not enabled. TLS is required to execute oauth2_auth_token test case.") + } + doOauth2TokenCreds(tc) + case "cancel_after_begin": + doCancelAfterBegin(tc) + case "cancel_after_first_response": + doCancelAfterFirstResponse(tc) + default: + grpclog.Fatal("Unsupported test case: ", *testCase) + } +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/interop/client/testdata/ca.pem b/Godeps/_workspace/src/google.golang.org/grpc/interop/client/testdata/ca.pem new file mode 100644 index 000000000..999da8977 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/interop/client/testdata/ca.pem @@ -0,0 +1,14 @@ +-----BEGIN CERTIFICATE----- +MIICIzCCAYwCCQCFTbF7XNSvvjANBgkqhkiG9w0BAQUFADBWMQswCQYDVQQGEwJB +VTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0 +cyBQdHkgTHRkMQ8wDQYDVQQDDAZ0ZXN0Y2EwHhcNMTQwNzE3MjMxNzUxWhcNMjQw +NzE0MjMxNzUxWjBWMQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEh +MB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMQ8wDQYDVQQDDAZ0ZXN0 +Y2EwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMBA3wVeTGHZR1Rye/i+J8a2 +cu5gXwFV6TnObzGM7bLFCO5i9v4mLo4iFzPsHmWDUxKS3Y8iXbu0eYBlLoNY0lSv +xDx33O+DuwMmVN+DzSD+Eod9zfvwOWHsazYCZT2PhNxnVWIuJXViY4JAHUGodjx+ +QAi6yCAurUZGvYXGgZSBAgMBAAEwDQYJKoZIhvcNAQEFBQADgYEAQoQVD8bwdtWJ +AniGBwcCfqYyH+/KpA10AcebJVVTyYbYvI9Q8d6RSVu4PZy9OALHR/QrWBdYTAyz +fNAmc2cmdkSRJzjhIaOstnQy1J+Fk0T9XyvQtq499yFbq9xogUVlEGH62xP6vH0Y +5ukK//dCPAzA11YuX2rnex0JhuTQfcI= +-----END CERTIFICATE----- diff --git a/Godeps/_workspace/src/google.golang.org/grpc/interop/client/testdata/server1.key b/Godeps/_workspace/src/google.golang.org/grpc/interop/client/testdata/server1.key new file mode 100644 index 000000000..ed00cd500 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/interop/client/testdata/server1.key @@ -0,0 +1,15 @@ +-----BEGIN RSA PRIVATE KEY----- +MIICWwIBAAKBgQDhwxUnKCwlSaWAwzOB2LSHVegJHv7DDWminTg4wzLLsf+LQ8nZ +bpjfn5vgIzxCuRh4Rp9QYM5FhfrJX9wcYawP/HTbJ7p7LVQO2QYAP+akMTHxgKuM +BzVV++3wWToKfVZUjFX8nfTfGMGwWAHJDnlEGnU4tl9UujoCV4ENJtzFoQIDAQAB +AoGAJ+6hpzNr24yTQZtFWQpDpEyFplddKJMOxDya3S9ppK3vTWrIITV2xNcucw7I +ceTbdyrGsyjsU0/HdCcIf9ym2jfmGLUwmyhltKVw0QYcFB0XLkc0nI5YvEYoeVDg +omZIXn1E3EW+sSIWSbkMu9bY2kstKXR2UZmMgWDtmBEPMaECQQD6yT4TAZM5hGBb +ciBKgMUP6PwOhPhOMPIvijO50Aiu6iuCV88l1QIy38gWVhxjNrq6P346j4IBg+kB +9alwpCODAkEA5nSnm9k6ykYeQWNS0fNWiRinCdl23A7usDGSuKKlm019iomJ/Rgd +MKDOp0q/2OostbteOWM2MRFf4jMH3wyVCwJAfAdjJ8szoNKTRSagSbh9vWygnB2v +IByc6l4TTuZQJRGzCveafz9lovuB3WohCABdQRd9ukCXL2CpsEpqzkafOQJAJUjc +USedDlq3zGZwYM1Yw8d8RuirBUFZNqJelYai+nRYClDkRVFgb5yksoYycbq5TxGo +VeqKOvgPpj4RWPHlLwJAGUMk3bqT91xBUCnLRs/vfoCpHpg6eywQTBDAV6xkyz4a +RH3I7/+yj3ZxR2JoWHgUwZ7lZk1VnhffFye7SBXyag== +-----END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/google.golang.org/grpc/interop/client/testdata/server1.pem b/Godeps/_workspace/src/google.golang.org/grpc/interop/client/testdata/server1.pem new file mode 100644 index 000000000..8e582e571 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/interop/client/testdata/server1.pem @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE----- +MIICmzCCAgSgAwIBAgIBAzANBgkqhkiG9w0BAQUFADBWMQswCQYDVQQGEwJBVTET +MBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQ +dHkgTHRkMQ8wDQYDVQQDDAZ0ZXN0Y2EwHhcNMTQwNzIyMDYwMDU3WhcNMjQwNzE5 +MDYwMDU3WjBkMQswCQYDVQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNV +BAcTB0NoaWNhZ28xFDASBgNVBAoTC0dvb2dsZSBJbmMuMRowGAYDVQQDFBEqLnRl +c3QuZ29vZ2xlLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA4cMVJygs +JUmlgMMzgdi0h1XoCR7+ww1pop04OMMyy7H/i0PJ2W6Y35+b4CM8QrkYeEafUGDO +RYX6yV/cHGGsD/x02ye6ey1UDtkGAD/mpDEx8YCrjAc1Vfvt8Fk6Cn1WVIxV/J30 +3xjBsFgByQ55RBp1OLZfVLo6AleBDSbcxaECAwEAAaNrMGkwCQYDVR0TBAIwADAL +BgNVHQ8EBAMCBeAwTwYDVR0RBEgwRoIQKi50ZXN0Lmdvb2dsZS5mcoIYd2F0ZXJ6 +b29pLnRlc3QuZ29vZ2xlLmJlghIqLnRlc3QueW91dHViZS5jb22HBMCoAQMwDQYJ +KoZIhvcNAQEFBQADgYEAM2Ii0LgTGbJ1j4oqX9bxVcxm+/R5Yf8oi0aZqTJlnLYS +wXcBykxTx181s7WyfJ49WwrYXo78zTDAnf1ma0fPq3e4mpspvyndLh1a+OarHa1e +aT0DIIYk7qeEa1YcVljx2KyLd0r1BBAfrwyGaEPVeJQVYWaOJRU2we/KD4ojf9s= +-----END CERTIFICATE----- diff --git a/Godeps/_workspace/src/google.golang.org/grpc/interop/grpc_testing/test.pb.go b/Godeps/_workspace/src/google.golang.org/grpc/interop/grpc_testing/test.pb.go new file mode 100644 index 000000000..b25e98b8e --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/interop/grpc_testing/test.pb.go @@ -0,0 +1,702 @@ +// Code generated by protoc-gen-go. +// source: test.proto +// DO NOT EDIT! + +/* +Package grpc_testing is a generated protocol buffer package. + +It is generated from these files: + test.proto + +It has these top-level messages: + Empty + Payload + SimpleRequest + SimpleResponse + StreamingInputCallRequest + StreamingInputCallResponse + ResponseParameters + StreamingOutputCallRequest + StreamingOutputCallResponse +*/ +package grpc_testing + +import proto "github.com/golang/protobuf/proto" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +// The type of payload that should be returned. +type PayloadType int32 + +const ( + // Compressable text format. + PayloadType_COMPRESSABLE PayloadType = 0 + // Uncompressable binary format. + PayloadType_UNCOMPRESSABLE PayloadType = 1 + // Randomly chosen from all other formats defined in this enum. + PayloadType_RANDOM PayloadType = 2 +) + +var PayloadType_name = map[int32]string{ + 0: "COMPRESSABLE", + 1: "UNCOMPRESSABLE", + 2: "RANDOM", +} +var PayloadType_value = map[string]int32{ + "COMPRESSABLE": 0, + "UNCOMPRESSABLE": 1, + "RANDOM": 2, +} + +func (x PayloadType) Enum() *PayloadType { + p := new(PayloadType) + *p = x + return p +} +func (x PayloadType) String() string { + return proto.EnumName(PayloadType_name, int32(x)) +} +func (x *PayloadType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(PayloadType_value, data, "PayloadType") + if err != nil { + return err + } + *x = PayloadType(value) + return nil +} + +type Empty struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} + +// A block of data, to simply increase gRPC message size. +type Payload struct { + // The type of data in body. + Type *PayloadType `protobuf:"varint,1,opt,name=type,enum=grpc.testing.PayloadType" json:"type,omitempty"` + // Primary contents of payload. + Body []byte `protobuf:"bytes,2,opt,name=body" json:"body,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Payload) Reset() { *m = Payload{} } +func (m *Payload) String() string { return proto.CompactTextString(m) } +func (*Payload) ProtoMessage() {} + +func (m *Payload) GetType() PayloadType { + if m != nil && m.Type != nil { + return *m.Type + } + return PayloadType_COMPRESSABLE +} + +func (m *Payload) GetBody() []byte { + if m != nil { + return m.Body + } + return nil +} + +// Unary request. +type SimpleRequest struct { + // Desired payload type in the response from the server. + // If response_type is RANDOM, server randomly chooses one from other formats. + ResponseType *PayloadType `protobuf:"varint,1,opt,name=response_type,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` + // Desired payload size in the response from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + ResponseSize *int32 `protobuf:"varint,2,opt,name=response_size" json:"response_size,omitempty"` + // Optional input payload sent along with the request. + Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"` + // Whether SimpleResponse should include username. + FillUsername *bool `protobuf:"varint,4,opt,name=fill_username" json:"fill_username,omitempty"` + // Whether SimpleResponse should include OAuth scope. + FillOauthScope *bool `protobuf:"varint,5,opt,name=fill_oauth_scope" json:"fill_oauth_scope,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SimpleRequest) Reset() { *m = SimpleRequest{} } +func (m *SimpleRequest) String() string { return proto.CompactTextString(m) } +func (*SimpleRequest) ProtoMessage() {} + +func (m *SimpleRequest) GetResponseType() PayloadType { + if m != nil && m.ResponseType != nil { + return *m.ResponseType + } + return PayloadType_COMPRESSABLE +} + +func (m *SimpleRequest) GetResponseSize() int32 { + if m != nil && m.ResponseSize != nil { + return *m.ResponseSize + } + return 0 +} + +func (m *SimpleRequest) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (m *SimpleRequest) GetFillUsername() bool { + if m != nil && m.FillUsername != nil { + return *m.FillUsername + } + return false +} + +func (m *SimpleRequest) GetFillOauthScope() bool { + if m != nil && m.FillOauthScope != nil { + return *m.FillOauthScope + } + return false +} + +// Unary response, as configured by the request. +type SimpleResponse struct { + // Payload to increase message size. + Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` + // The user the request came from, for verifying authentication was + // successful when the client expected it. + Username *string `protobuf:"bytes,2,opt,name=username" json:"username,omitempty"` + // OAuth scope. + OauthScope *string `protobuf:"bytes,3,opt,name=oauth_scope" json:"oauth_scope,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SimpleResponse) Reset() { *m = SimpleResponse{} } +func (m *SimpleResponse) String() string { return proto.CompactTextString(m) } +func (*SimpleResponse) ProtoMessage() {} + +func (m *SimpleResponse) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (m *SimpleResponse) GetUsername() string { + if m != nil && m.Username != nil { + return *m.Username + } + return "" +} + +func (m *SimpleResponse) GetOauthScope() string { + if m != nil && m.OauthScope != nil { + return *m.OauthScope + } + return "" +} + +// Client-streaming request. +type StreamingInputCallRequest struct { + // Optional input payload sent along with the request. + Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StreamingInputCallRequest) Reset() { *m = StreamingInputCallRequest{} } +func (m *StreamingInputCallRequest) String() string { return proto.CompactTextString(m) } +func (*StreamingInputCallRequest) ProtoMessage() {} + +func (m *StreamingInputCallRequest) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +// Client-streaming response. +type StreamingInputCallResponse struct { + // Aggregated size of payloads received from the client. + AggregatedPayloadSize *int32 `protobuf:"varint,1,opt,name=aggregated_payload_size" json:"aggregated_payload_size,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StreamingInputCallResponse) Reset() { *m = StreamingInputCallResponse{} } +func (m *StreamingInputCallResponse) String() string { return proto.CompactTextString(m) } +func (*StreamingInputCallResponse) ProtoMessage() {} + +func (m *StreamingInputCallResponse) GetAggregatedPayloadSize() int32 { + if m != nil && m.AggregatedPayloadSize != nil { + return *m.AggregatedPayloadSize + } + return 0 +} + +// Configuration for a particular response. +type ResponseParameters struct { + // Desired payload sizes in responses from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + Size *int32 `protobuf:"varint,1,opt,name=size" json:"size,omitempty"` + // Desired interval between consecutive responses in the response stream in + // microseconds. + IntervalUs *int32 `protobuf:"varint,2,opt,name=interval_us" json:"interval_us,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ResponseParameters) Reset() { *m = ResponseParameters{} } +func (m *ResponseParameters) String() string { return proto.CompactTextString(m) } +func (*ResponseParameters) ProtoMessage() {} + +func (m *ResponseParameters) GetSize() int32 { + if m != nil && m.Size != nil { + return *m.Size + } + return 0 +} + +func (m *ResponseParameters) GetIntervalUs() int32 { + if m != nil && m.IntervalUs != nil { + return *m.IntervalUs + } + return 0 +} + +// Server-streaming request. +type StreamingOutputCallRequest struct { + // Desired payload type in the response from the server. + // If response_type is RANDOM, the payload from each response in the stream + // might be of different types. This is to simulate a mixed type of payload + // stream. + ResponseType *PayloadType `protobuf:"varint,1,opt,name=response_type,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` + // Configuration for each expected response message. + ResponseParameters []*ResponseParameters `protobuf:"bytes,2,rep,name=response_parameters" json:"response_parameters,omitempty"` + // Optional input payload sent along with the request. + Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StreamingOutputCallRequest) Reset() { *m = StreamingOutputCallRequest{} } +func (m *StreamingOutputCallRequest) String() string { return proto.CompactTextString(m) } +func (*StreamingOutputCallRequest) ProtoMessage() {} + +func (m *StreamingOutputCallRequest) GetResponseType() PayloadType { + if m != nil && m.ResponseType != nil { + return *m.ResponseType + } + return PayloadType_COMPRESSABLE +} + +func (m *StreamingOutputCallRequest) GetResponseParameters() []*ResponseParameters { + if m != nil { + return m.ResponseParameters + } + return nil +} + +func (m *StreamingOutputCallRequest) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +// Server-streaming response, as configured by the request and parameters. +type StreamingOutputCallResponse struct { + // Payload to increase response size. + Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StreamingOutputCallResponse) Reset() { *m = StreamingOutputCallResponse{} } +func (m *StreamingOutputCallResponse) String() string { return proto.CompactTextString(m) } +func (*StreamingOutputCallResponse) ProtoMessage() {} + +func (m *StreamingOutputCallResponse) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +func init() { + proto.RegisterEnum("grpc.testing.PayloadType", PayloadType_name, PayloadType_value) +} + +// Client API for TestService service + +type TestServiceClient interface { + // One empty request followed by one empty response. + EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) + // One request followed by one response. + // The server returns the client payload as-is. + UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) + // One request followed by a sequence of responses (streamed download). + // The server returns the payload with client desired type and sizes. + StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) + // A sequence of requests followed by one response (streamed upload). + // The server returns the aggregated size of client payload as the result. + StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) + // A sequence of requests with each request served by the server immediately. + // As one request could lead to multiple responses, this interface + // demonstrates the idea of full duplexing. + FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) + // A sequence of requests followed by a sequence of responses. + // The server buffers all the client requests and then serves them in order. A + // stream of responses are returned to the client when the server starts with + // first request. + HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) +} + +type testServiceClient struct { + cc *grpc.ClientConn +} + +func NewTestServiceClient(cc *grpc.ClientConn) TestServiceClient { + return &testServiceClient{cc} +} + +func (c *testServiceClient) EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := grpc.Invoke(ctx, "/grpc.testing.TestService/EmptyCall", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *testServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { + out := new(SimpleResponse) + err := grpc.Invoke(ctx, "/grpc.testing.TestService/UnaryCall", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *testServiceClient) StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) { + stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[0], c.cc, "/grpc.testing.TestService/StreamingOutputCall", opts...) + if err != nil { + return nil, err + } + x := &testServiceStreamingOutputCallClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type TestService_StreamingOutputCallClient interface { + Recv() (*StreamingOutputCallResponse, error) + grpc.ClientStream +} + +type testServiceStreamingOutputCallClient struct { + grpc.ClientStream +} + +func (x *testServiceStreamingOutputCallClient) Recv() (*StreamingOutputCallResponse, error) { + m := new(StreamingOutputCallResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *testServiceClient) StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) { + stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[1], c.cc, "/grpc.testing.TestService/StreamingInputCall", opts...) + if err != nil { + return nil, err + } + x := &testServiceStreamingInputCallClient{stream} + return x, nil +} + +type TestService_StreamingInputCallClient interface { + Send(*StreamingInputCallRequest) error + CloseAndRecv() (*StreamingInputCallResponse, error) + grpc.ClientStream +} + +type testServiceStreamingInputCallClient struct { + grpc.ClientStream +} + +func (x *testServiceStreamingInputCallClient) Send(m *StreamingInputCallRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *testServiceStreamingInputCallClient) CloseAndRecv() (*StreamingInputCallResponse, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(StreamingInputCallResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *testServiceClient) FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) { + stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[2], c.cc, "/grpc.testing.TestService/FullDuplexCall", opts...) + if err != nil { + return nil, err + } + x := &testServiceFullDuplexCallClient{stream} + return x, nil +} + +type TestService_FullDuplexCallClient interface { + Send(*StreamingOutputCallRequest) error + Recv() (*StreamingOutputCallResponse, error) + grpc.ClientStream +} + +type testServiceFullDuplexCallClient struct { + grpc.ClientStream +} + +func (x *testServiceFullDuplexCallClient) Send(m *StreamingOutputCallRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *testServiceFullDuplexCallClient) Recv() (*StreamingOutputCallResponse, error) { + m := new(StreamingOutputCallResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *testServiceClient) HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) { + stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[3], c.cc, "/grpc.testing.TestService/HalfDuplexCall", opts...) + if err != nil { + return nil, err + } + x := &testServiceHalfDuplexCallClient{stream} + return x, nil +} + +type TestService_HalfDuplexCallClient interface { + Send(*StreamingOutputCallRequest) error + Recv() (*StreamingOutputCallResponse, error) + grpc.ClientStream +} + +type testServiceHalfDuplexCallClient struct { + grpc.ClientStream +} + +func (x *testServiceHalfDuplexCallClient) Send(m *StreamingOutputCallRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *testServiceHalfDuplexCallClient) Recv() (*StreamingOutputCallResponse, error) { + m := new(StreamingOutputCallResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for TestService service + +type TestServiceServer interface { + // One empty request followed by one empty response. + EmptyCall(context.Context, *Empty) (*Empty, error) + // One request followed by one response. + // The server returns the client payload as-is. + UnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error) + // One request followed by a sequence of responses (streamed download). + // The server returns the payload with client desired type and sizes. + StreamingOutputCall(*StreamingOutputCallRequest, TestService_StreamingOutputCallServer) error + // A sequence of requests followed by one response (streamed upload). + // The server returns the aggregated size of client payload as the result. + StreamingInputCall(TestService_StreamingInputCallServer) error + // A sequence of requests with each request served by the server immediately. + // As one request could lead to multiple responses, this interface + // demonstrates the idea of full duplexing. + FullDuplexCall(TestService_FullDuplexCallServer) error + // A sequence of requests followed by a sequence of responses. + // The server buffers all the client requests and then serves them in order. A + // stream of responses are returned to the client when the server starts with + // first request. + HalfDuplexCall(TestService_HalfDuplexCallServer) error +} + +func RegisterTestServiceServer(s *grpc.Server, srv TestServiceServer) { + s.RegisterService(&_TestService_serviceDesc, srv) +} + +func _TestService_EmptyCall_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { + in := new(Empty) + if err := codec.Unmarshal(buf, in); err != nil { + return nil, err + } + out, err := srv.(TestServiceServer).EmptyCall(ctx, in) + if err != nil { + return nil, err + } + return out, nil +} + +func _TestService_UnaryCall_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { + in := new(SimpleRequest) + if err := codec.Unmarshal(buf, in); err != nil { + return nil, err + } + out, err := srv.(TestServiceServer).UnaryCall(ctx, in) + if err != nil { + return nil, err + } + return out, nil +} + +func _TestService_StreamingOutputCall_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(StreamingOutputCallRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(TestServiceServer).StreamingOutputCall(m, &testServiceStreamingOutputCallServer{stream}) +} + +type TestService_StreamingOutputCallServer interface { + Send(*StreamingOutputCallResponse) error + grpc.ServerStream +} + +type testServiceStreamingOutputCallServer struct { + grpc.ServerStream +} + +func (x *testServiceStreamingOutputCallServer) Send(m *StreamingOutputCallResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _TestService_StreamingInputCall_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TestServiceServer).StreamingInputCall(&testServiceStreamingInputCallServer{stream}) +} + +type TestService_StreamingInputCallServer interface { + SendAndClose(*StreamingInputCallResponse) error + Recv() (*StreamingInputCallRequest, error) + grpc.ServerStream +} + +type testServiceStreamingInputCallServer struct { + grpc.ServerStream +} + +func (x *testServiceStreamingInputCallServer) SendAndClose(m *StreamingInputCallResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *testServiceStreamingInputCallServer) Recv() (*StreamingInputCallRequest, error) { + m := new(StreamingInputCallRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _TestService_FullDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TestServiceServer).FullDuplexCall(&testServiceFullDuplexCallServer{stream}) +} + +type TestService_FullDuplexCallServer interface { + Send(*StreamingOutputCallResponse) error + Recv() (*StreamingOutputCallRequest, error) + grpc.ServerStream +} + +type testServiceFullDuplexCallServer struct { + grpc.ServerStream +} + +func (x *testServiceFullDuplexCallServer) Send(m *StreamingOutputCallResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *testServiceFullDuplexCallServer) Recv() (*StreamingOutputCallRequest, error) { + m := new(StreamingOutputCallRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _TestService_HalfDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TestServiceServer).HalfDuplexCall(&testServiceHalfDuplexCallServer{stream}) +} + +type TestService_HalfDuplexCallServer interface { + Send(*StreamingOutputCallResponse) error + Recv() (*StreamingOutputCallRequest, error) + grpc.ServerStream +} + +type testServiceHalfDuplexCallServer struct { + grpc.ServerStream +} + +func (x *testServiceHalfDuplexCallServer) Send(m *StreamingOutputCallResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *testServiceHalfDuplexCallServer) Recv() (*StreamingOutputCallRequest, error) { + m := new(StreamingOutputCallRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _TestService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.testing.TestService", + HandlerType: (*TestServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "EmptyCall", + Handler: _TestService_EmptyCall_Handler, + }, + { + MethodName: "UnaryCall", + Handler: _TestService_UnaryCall_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamingOutputCall", + Handler: _TestService_StreamingOutputCall_Handler, + ServerStreams: true, + }, + { + StreamName: "StreamingInputCall", + Handler: _TestService_StreamingInputCall_Handler, + ClientStreams: true, + }, + { + StreamName: "FullDuplexCall", + Handler: _TestService_FullDuplexCall_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "HalfDuplexCall", + Handler: _TestService_HalfDuplexCall_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/interop/grpc_testing/test.proto b/Godeps/_workspace/src/google.golang.org/grpc/interop/grpc_testing/test.proto new file mode 100644 index 000000000..b5bfe0537 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/interop/grpc_testing/test.proto @@ -0,0 +1,140 @@ +// An integration test service that covers all the method signature permutations +// of unary/streaming requests/responses. +syntax = "proto2"; + +package grpc.testing; + +message Empty {} + +// The type of payload that should be returned. +enum PayloadType { + // Compressable text format. + COMPRESSABLE = 0; + + // Uncompressable binary format. + UNCOMPRESSABLE = 1; + + // Randomly chosen from all other formats defined in this enum. + RANDOM = 2; +} + +// A block of data, to simply increase gRPC message size. +message Payload { + // The type of data in body. + optional PayloadType type = 1; + // Primary contents of payload. + optional bytes body = 2; +} + +// Unary request. +message SimpleRequest { + // Desired payload type in the response from the server. + // If response_type is RANDOM, server randomly chooses one from other formats. + optional PayloadType response_type = 1; + + // Desired payload size in the response from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + optional int32 response_size = 2; + + // Optional input payload sent along with the request. + optional Payload payload = 3; + + // Whether SimpleResponse should include username. + optional bool fill_username = 4; + + // Whether SimpleResponse should include OAuth scope. + optional bool fill_oauth_scope = 5; +} + +// Unary response, as configured by the request. +message SimpleResponse { + // Payload to increase message size. + optional Payload payload = 1; + + // The user the request came from, for verifying authentication was + // successful when the client expected it. + optional string username = 2; + + // OAuth scope. + optional string oauth_scope = 3; +} + +// Client-streaming request. +message StreamingInputCallRequest { + // Optional input payload sent along with the request. + optional Payload payload = 1; + + // Not expecting any payload from the response. +} + +// Client-streaming response. +message StreamingInputCallResponse { + // Aggregated size of payloads received from the client. + optional int32 aggregated_payload_size = 1; +} + +// Configuration for a particular response. +message ResponseParameters { + // Desired payload sizes in responses from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + optional int32 size = 1; + + // Desired interval between consecutive responses in the response stream in + // microseconds. + optional int32 interval_us = 2; +} + +// Server-streaming request. +message StreamingOutputCallRequest { + // Desired payload type in the response from the server. + // If response_type is RANDOM, the payload from each response in the stream + // might be of different types. This is to simulate a mixed type of payload + // stream. + optional PayloadType response_type = 1; + + // Configuration for each expected response message. + repeated ResponseParameters response_parameters = 2; + + // Optional input payload sent along with the request. + optional Payload payload = 3; +} + +// Server-streaming response, as configured by the request and parameters. +message StreamingOutputCallResponse { + // Payload to increase response size. + optional Payload payload = 1; +} + +// A simple service to test the various types of RPCs and experiment with +// performance with various types of payload. +service TestService { + // One empty request followed by one empty response. + rpc EmptyCall(Empty) returns (Empty); + + // One request followed by one response. + // The server returns the client payload as-is. + rpc UnaryCall(SimpleRequest) returns (SimpleResponse); + + // One request followed by a sequence of responses (streamed download). + // The server returns the payload with client desired type and sizes. + rpc StreamingOutputCall(StreamingOutputCallRequest) + returns (stream StreamingOutputCallResponse); + + // A sequence of requests followed by one response (streamed upload). + // The server returns the aggregated size of client payload as the result. + rpc StreamingInputCall(stream StreamingInputCallRequest) + returns (StreamingInputCallResponse); + + // A sequence of requests with each request served by the server immediately. + // As one request could lead to multiple responses, this interface + // demonstrates the idea of full duplexing. + rpc FullDuplexCall(stream StreamingOutputCallRequest) + returns (stream StreamingOutputCallResponse); + + // A sequence of requests followed by a sequence of responses. + // The server buffers all the client requests and then serves them in order. A + // stream of responses are returned to the client when the server starts with + // first request. + rpc HalfDuplexCall(stream StreamingOutputCallRequest) + returns (stream StreamingOutputCallResponse); +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/interop/server/server.go b/Godeps/_workspace/src/google.golang.org/grpc/interop/server/server.go new file mode 100644 index 000000000..f781c9d53 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/interop/server/server.go @@ -0,0 +1,209 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package main + +import ( + "flag" + "fmt" + "io" + "net" + "strconv" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +var ( + useTLS = flag.Bool("use_tls", false, "Connection uses TLS if true, else plain TCP") + certFile = flag.String("tls_cert_file", "testdata/server1.pem", "The TLS cert file") + keyFile = flag.String("tls_key_file", "testdata/server1.key", "The TLS key file") + port = flag.Int("port", 10000, "The server port") +) + +type testServer struct { +} + +func (s *testServer) EmptyCall(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return new(testpb.Empty), nil +} + +func newPayload(t testpb.PayloadType, size int32) (*testpb.Payload, error) { + if size < 0 { + return nil, fmt.Errorf("requested a response with invalid length %d", size) + } + body := make([]byte, size) + switch t { + case testpb.PayloadType_COMPRESSABLE: + case testpb.PayloadType_UNCOMPRESSABLE: + return nil, fmt.Errorf("payloadType UNCOMPRESSABLE is not supported") + default: + return nil, fmt.Errorf("unsupported payload type: %d", t) + } + return &testpb.Payload{ + Type: t.Enum(), + Body: body, + }, nil +} + +func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + pl, err := newPayload(in.GetResponseType(), in.GetResponseSize()) + if err != nil { + return nil, err + } + return &testpb.SimpleResponse{ + Payload: pl, + }, nil +} + +func (s *testServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest, stream testpb.TestService_StreamingOutputCallServer) error { + cs := args.GetResponseParameters() + for _, c := range cs { + if us := c.GetIntervalUs(); us > 0 { + time.Sleep(time.Duration(us) * time.Microsecond) + } + pl, err := newPayload(args.GetResponseType(), c.GetSize()) + if err != nil { + return err + } + if err := stream.Send(&testpb.StreamingOutputCallResponse{ + Payload: pl, + }); err != nil { + return err + } + } + return nil +} + +func (s *testServer) StreamingInputCall(stream testpb.TestService_StreamingInputCallServer) error { + var sum int + for { + in, err := stream.Recv() + if err == io.EOF { + return stream.SendAndClose(&testpb.StreamingInputCallResponse{ + AggregatedPayloadSize: proto.Int32(int32(sum)), + }) + } + if err != nil { + return err + } + p := in.GetPayload().GetBody() + sum += len(p) + } +} + +func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error { + for { + in, err := stream.Recv() + if err == io.EOF { + // read done. + return nil + } + if err != nil { + return err + } + cs := in.GetResponseParameters() + for _, c := range cs { + if us := c.GetIntervalUs(); us > 0 { + time.Sleep(time.Duration(us) * time.Microsecond) + } + pl, err := newPayload(in.GetResponseType(), c.GetSize()) + if err != nil { + return err + } + if err := stream.Send(&testpb.StreamingOutputCallResponse{ + Payload: pl, + }); err != nil { + return err + } + } + } +} + +func (s *testServer) HalfDuplexCall(stream testpb.TestService_HalfDuplexCallServer) error { + var msgBuf []*testpb.StreamingOutputCallRequest + for { + in, err := stream.Recv() + if err == io.EOF { + // read done. + break + } + if err != nil { + return err + } + msgBuf = append(msgBuf, in) + } + for _, m := range msgBuf { + cs := m.GetResponseParameters() + for _, c := range cs { + if us := c.GetIntervalUs(); us > 0 { + time.Sleep(time.Duration(us) * time.Microsecond) + } + pl, err := newPayload(m.GetResponseType(), c.GetSize()) + if err != nil { + return err + } + if err := stream.Send(&testpb.StreamingOutputCallResponse{ + Payload: pl, + }); err != nil { + return err + } + } + } + return nil +} + +func main() { + flag.Parse() + p := strconv.Itoa(*port) + lis, err := net.Listen("tcp", ":"+p) + if err != nil { + grpclog.Fatalf("failed to listen: %v", err) + } + var opts []grpc.ServerOption + if *useTLS { + creds, err := credentials.NewServerTLSFromFile(*certFile, *keyFile) + if err != nil { + grpclog.Fatalf("Failed to generate credentials %v", err) + } + opts = []grpc.ServerOption{grpc.Creds(creds)} + } + server := grpc.NewServer(opts...) + testpb.RegisterTestServiceServer(server, &testServer{}) + server.Serve(lis) +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/interop/server/testdata/ca.pem b/Godeps/_workspace/src/google.golang.org/grpc/interop/server/testdata/ca.pem new file mode 100644 index 000000000..999da8977 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/interop/server/testdata/ca.pem @@ -0,0 +1,14 @@ +-----BEGIN CERTIFICATE----- +MIICIzCCAYwCCQCFTbF7XNSvvjANBgkqhkiG9w0BAQUFADBWMQswCQYDVQQGEwJB +VTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0 +cyBQdHkgTHRkMQ8wDQYDVQQDDAZ0ZXN0Y2EwHhcNMTQwNzE3MjMxNzUxWhcNMjQw +NzE0MjMxNzUxWjBWMQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEh +MB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMQ8wDQYDVQQDDAZ0ZXN0 +Y2EwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMBA3wVeTGHZR1Rye/i+J8a2 +cu5gXwFV6TnObzGM7bLFCO5i9v4mLo4iFzPsHmWDUxKS3Y8iXbu0eYBlLoNY0lSv +xDx33O+DuwMmVN+DzSD+Eod9zfvwOWHsazYCZT2PhNxnVWIuJXViY4JAHUGodjx+ +QAi6yCAurUZGvYXGgZSBAgMBAAEwDQYJKoZIhvcNAQEFBQADgYEAQoQVD8bwdtWJ +AniGBwcCfqYyH+/KpA10AcebJVVTyYbYvI9Q8d6RSVu4PZy9OALHR/QrWBdYTAyz +fNAmc2cmdkSRJzjhIaOstnQy1J+Fk0T9XyvQtq499yFbq9xogUVlEGH62xP6vH0Y +5ukK//dCPAzA11YuX2rnex0JhuTQfcI= +-----END CERTIFICATE----- diff --git a/Godeps/_workspace/src/google.golang.org/grpc/interop/server/testdata/server1.key b/Godeps/_workspace/src/google.golang.org/grpc/interop/server/testdata/server1.key new file mode 100644 index 000000000..ed00cd500 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/interop/server/testdata/server1.key @@ -0,0 +1,15 @@ +-----BEGIN RSA PRIVATE KEY----- +MIICWwIBAAKBgQDhwxUnKCwlSaWAwzOB2LSHVegJHv7DDWminTg4wzLLsf+LQ8nZ +bpjfn5vgIzxCuRh4Rp9QYM5FhfrJX9wcYawP/HTbJ7p7LVQO2QYAP+akMTHxgKuM +BzVV++3wWToKfVZUjFX8nfTfGMGwWAHJDnlEGnU4tl9UujoCV4ENJtzFoQIDAQAB +AoGAJ+6hpzNr24yTQZtFWQpDpEyFplddKJMOxDya3S9ppK3vTWrIITV2xNcucw7I +ceTbdyrGsyjsU0/HdCcIf9ym2jfmGLUwmyhltKVw0QYcFB0XLkc0nI5YvEYoeVDg +omZIXn1E3EW+sSIWSbkMu9bY2kstKXR2UZmMgWDtmBEPMaECQQD6yT4TAZM5hGBb +ciBKgMUP6PwOhPhOMPIvijO50Aiu6iuCV88l1QIy38gWVhxjNrq6P346j4IBg+kB +9alwpCODAkEA5nSnm9k6ykYeQWNS0fNWiRinCdl23A7usDGSuKKlm019iomJ/Rgd +MKDOp0q/2OostbteOWM2MRFf4jMH3wyVCwJAfAdjJ8szoNKTRSagSbh9vWygnB2v +IByc6l4TTuZQJRGzCveafz9lovuB3WohCABdQRd9ukCXL2CpsEpqzkafOQJAJUjc +USedDlq3zGZwYM1Yw8d8RuirBUFZNqJelYai+nRYClDkRVFgb5yksoYycbq5TxGo +VeqKOvgPpj4RWPHlLwJAGUMk3bqT91xBUCnLRs/vfoCpHpg6eywQTBDAV6xkyz4a +RH3I7/+yj3ZxR2JoWHgUwZ7lZk1VnhffFye7SBXyag== +-----END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/google.golang.org/grpc/interop/server/testdata/server1.pem b/Godeps/_workspace/src/google.golang.org/grpc/interop/server/testdata/server1.pem new file mode 100644 index 000000000..8e582e571 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/interop/server/testdata/server1.pem @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE----- +MIICmzCCAgSgAwIBAgIBAzANBgkqhkiG9w0BAQUFADBWMQswCQYDVQQGEwJBVTET +MBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQ +dHkgTHRkMQ8wDQYDVQQDDAZ0ZXN0Y2EwHhcNMTQwNzIyMDYwMDU3WhcNMjQwNzE5 +MDYwMDU3WjBkMQswCQYDVQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNV +BAcTB0NoaWNhZ28xFDASBgNVBAoTC0dvb2dsZSBJbmMuMRowGAYDVQQDFBEqLnRl +c3QuZ29vZ2xlLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA4cMVJygs +JUmlgMMzgdi0h1XoCR7+ww1pop04OMMyy7H/i0PJ2W6Y35+b4CM8QrkYeEafUGDO +RYX6yV/cHGGsD/x02ye6ey1UDtkGAD/mpDEx8YCrjAc1Vfvt8Fk6Cn1WVIxV/J30 +3xjBsFgByQ55RBp1OLZfVLo6AleBDSbcxaECAwEAAaNrMGkwCQYDVR0TBAIwADAL +BgNVHQ8EBAMCBeAwTwYDVR0RBEgwRoIQKi50ZXN0Lmdvb2dsZS5mcoIYd2F0ZXJ6 +b29pLnRlc3QuZ29vZ2xlLmJlghIqLnRlc3QueW91dHViZS5jb22HBMCoAQMwDQYJ +KoZIhvcNAQEFBQADgYEAM2Ii0LgTGbJ1j4oqX9bxVcxm+/R5Yf8oi0aZqTJlnLYS +wXcBykxTx181s7WyfJ49WwrYXo78zTDAnf1ma0fPq3e4mpspvyndLh1a+OarHa1e +aT0DIIYk7qeEa1YcVljx2KyLd0r1BBAfrwyGaEPVeJQVYWaOJRU2we/KD4ojf9s= +-----END CERTIFICATE----- diff --git a/Godeps/_workspace/src/google.golang.org/grpc/metadata/metadata.go b/Godeps/_workspace/src/google.golang.org/grpc/metadata/metadata.go new file mode 100644 index 000000000..5f26abaea --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/metadata/metadata.go @@ -0,0 +1,146 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// Package metadata define the structure of the metadata supported by gRPC library. +package metadata + +import ( + "encoding/base64" + "fmt" + "strings" + + "golang.org/x/net/context" +) + +const ( + binHdrSuffix = "-bin" +) + +// grpc-http2 requires ASCII header key and value (more detail can be found in +// "Requests" subsection in go/grpc-http2). +func isASCII(s string) bool { + for _, c := range s { + if c > 127 { + return false + } + } + return true +} + +// encodeKeyValue encodes key and value qualified for transmission via gRPC. +// Transmitting binary headers violates HTTP/2 spec. +// TODO(zhaoq): Maybe check if k is ASCII also. +func encodeKeyValue(k, v string) (string, string) { + if isASCII(v) { + return k, v + } + key := strings.ToLower(k + binHdrSuffix) + val := base64.StdEncoding.EncodeToString([]byte(v)) + return key, string(val) +} + +// DecodeKeyValue returns the original key and value corresponding to the +// encoded data in k, v. +func DecodeKeyValue(k, v string) (string, string, error) { + if !strings.HasSuffix(k, binHdrSuffix) { + return k, v, nil + } + key := k[:len(k)-len(binHdrSuffix)] + val, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return "", "", err + } + return key, string(val), nil +} + +// MD is a mapping from metadata keys to values. Users should use the following +// two convenience functions New and Pairs to generate MD. +type MD map[string][]string + +// New creates a MD from given key-value map. +func New(m map[string]string) MD { + md := MD{} + for k, v := range m { + key, val := encodeKeyValue(k, v) + md[key] = append(md[key], val) + } + return md +} + +// Pairs returns an MD formed by the mapping of key, value ... +// Pairs panics if len(kv) is odd. +func Pairs(kv ...string) MD { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) + } + md := MD{} + var k string + for i, s := range kv { + if i%2 == 0 { + k = s + continue + } + key, val := encodeKeyValue(k, s) + md[key] = append(md[key], val) + } + return md +} + +// Len returns the number of items in md. +func (md MD) Len() int { + return len(md) +} + +// Copy returns a copy of md. +func (md MD) Copy() MD { + out := MD{} + for k, v := range md { + for _, i := range v { + out[k] = append(out[k], i) + } + } + return out +} + +type mdKey struct{} + +// NewContext creates a new context with md attached. +func NewContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdKey{}, md) +} + +// FromContext returns the MD in ctx if it exists. +func FromContext(ctx context.Context) (md MD, ok bool) { + md, ok = ctx.Value(mdKey{}).(MD) + return +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/metadata/metadata_test.go b/Godeps/_workspace/src/google.golang.org/grpc/metadata/metadata_test.go new file mode 100644 index 000000000..ef3a1b8c4 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/metadata/metadata_test.go @@ -0,0 +1,82 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package metadata + +import ( + "reflect" + "testing" +) + +const binaryValue = string(128) + +func TestDecodeKeyValue(t *testing.T) { + for _, test := range []struct { + // input + kin string + vin string + // output + kout string + vout string + err error + }{ + {"a", "abc", "a", "abc", nil}, + {"key-bin", "Zm9vAGJhcg==", "key", "foo\x00bar", nil}, + {"key-bin", "woA=", "key", binaryValue, nil}, + } { + k, v, err := DecodeKeyValue(test.kin, test.vin) + if k != test.kout || !reflect.DeepEqual(v, test.vout) || !reflect.DeepEqual(err, test.err) { + t.Fatalf("DecodeKeyValue(%q, %q) = %q, %q, %v, want %q, %q, %v", test.kin, test.vin, k, v, err, test.kout, test.vout, test.err) + } + } +} + +func TestPairsMD(t *testing.T) { + for _, test := range []struct { + // input + kv []string + // output + md MD + }{ + {[]string{}, MD{}}, + {[]string{"k1", "v1", "k2", binaryValue}, New(map[string]string{ + "k1": "v1", + "k2-bin": "woA=", + })}, + } { + md := Pairs(test.kv...) + if !reflect.DeepEqual(md, test.md) { + t.Fatalf("Pairs(%v) = %v, want %v", test.kv, md, test.md) + } + } +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/naming/etcd/etcd.go b/Godeps/_workspace/src/google.golang.org/grpc/naming/etcd/etcd.go new file mode 100644 index 000000000..e140068a9 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/naming/etcd/etcd.go @@ -0,0 +1,97 @@ +package etcd + +import ( + etcdcl "github.com/coreos/etcd/client" + "golang.org/x/net/context" + "google.golang.org/grpc/naming" +) + +type watcher struct { + wr etcdcl.Watcher + ctx context.Context + cancel context.CancelFunc +} + +func (w *watcher) Next() (*naming.Update, error) { + for { + resp, err := w.wr.Next(w.ctx) + if err != nil { + return nil, err + } + if resp.Node.Dir { + continue + } + var act naming.OP + if resp.Action == "set" { + if resp.PrevNode == nil { + act = naming.Add + } else { + act = naming.Modify + } + } else if resp.Action == "delete" { + act = naming.Delete + } + if act == naming.No { + continue + } + return &naming.Update{ + Op: act, + Key: resp.Node.Key, + Val: resp.Node.Value, + }, nil + } +} + +func (w *watcher) Stop() { + w.cancel() +} + +type resolver struct { + kapi etcdcl.KeysAPI +} + +func (r *resolver) NewWatcher(target string) naming.Watcher { + ctx, cancel := context.WithCancel(context.Background()) + return &watcher{ + wr: r.kapi.Watcher(target, &etcdcl.WatcherOptions{Recursive: true}), + ctx: ctx, + cancel: cancel, + } + +} + +// getNode reports the naming.Update starting from node recursively. +func getNode(node *etcdcl.Node) (updates []*naming.Update) { + for _, v := range node.Nodes { + updates = append(updates, getNode(v)...) + } + if !node.Dir { + entry := &naming.Update{ + Op: naming.Add, + Key: node.Key, + Val: node.Value, + } + updates = []*naming.Update{entry} + } + return +} + +func (r *resolver) Resolve(target string) ([]*naming.Update, error) { + resp, err := r.kapi.Get(context.Background(), target, &etcdcl.GetOptions{Recursive: true}) + if err != nil { + return nil, err + } + updates := getNode(resp.Node) + return updates, nil +} + +// NewResolver creates an etcd-based naming.Resolver. +func NewResolver(cfg etcdcl.Config) (naming.Resolver, error) { + c, err := etcdcl.New(cfg) + if err != nil { + return nil, err + } + return &resolver{ + kapi: etcdcl.NewKeysAPI(c), + }, nil +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/naming/naming.go b/Godeps/_workspace/src/google.golang.org/grpc/naming/naming.go new file mode 100644 index 000000000..a1fd33570 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/naming/naming.go @@ -0,0 +1,43 @@ +package naming + +// OP defines the corresponding operations for a name resolution change. +type OP uint8 + +const ( + // No indicates there are no changes. + No OP = iota + // Add indicates a new address is added. + Add + // Delete indicates an exisiting address is deleted. + Delete + // Modify indicates an existing address is modified. + Modify +) + +type ServiceConfig interface{} + +// Update defines a name resolution change. +type Update struct { + // Op indicates the operation of the update. + Op OP + Key string + Val string + Config ServiceConfig +} + +// Resolver does one-shot name resolution and creates a Watcher to +// watch the future updates. +type Resolver interface { + // Resolve returns the name resolution results. + Resolve(target string) ([]*Update, error) + // NewWatcher creates a Watcher to watch the changes on target. + NewWatcher(target string) Watcher +} + +// Watcher watches the updates for a particular target. +type Watcher interface { + // Next blocks until an update or error happens. + Next() (*Update, error) + // Stop stops the Watcher. + Stop() +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/rpc_util.go b/Godeps/_workspace/src/google.golang.org/grpc/rpc_util.go new file mode 100644 index 000000000..a0f0b48bb --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/rpc_util.go @@ -0,0 +1,307 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "math/rand" + "os" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/transport" +) + +// Codec defines the interface gRPC uses to encode and decode messages. +type Codec interface { + // Marshal returns the wire format of v. + Marshal(v interface{}) ([]byte, error) + // Unmarshal parses the wire format into v. + Unmarshal(data []byte, v interface{}) error + // String returns the name of the Codec implementation. The returned + // string will be used as part of content type in transmission. + String() string +} + +// protoCodec is a Codec implemetation with protobuf. It is the default codec for gRPC. +type protoCodec struct{} + +func (protoCodec) Marshal(v interface{}) ([]byte, error) { + return proto.Marshal(v.(proto.Message)) +} + +func (protoCodec) Unmarshal(data []byte, v interface{}) error { + return proto.Unmarshal(data, v.(proto.Message)) +} + +func (protoCodec) String() string { + return "proto" +} + +// CallOption configures a Call before it starts or extracts information from +// a Call after it completes. +type CallOption interface { + // before is called before the call is sent to any server. If before + // returns a non-nil error, the RPC fails with that error. + before(*callInfo) error + + // after is called after the call has completed. after cannot return an + // error, so any failures should be reported via output parameters. + after(*callInfo) +} + +type beforeCall func(c *callInfo) error + +func (o beforeCall) before(c *callInfo) error { return o(c) } +func (o beforeCall) after(c *callInfo) {} + +type afterCall func(c *callInfo) + +func (o afterCall) before(c *callInfo) error { return nil } +func (o afterCall) after(c *callInfo) { o(c) } + +// Header returns a CallOptions that retrieves the header metadata +// for a unary RPC. +func Header(md *metadata.MD) CallOption { + return afterCall(func(c *callInfo) { + *md = c.headerMD + }) +} + +// Trailer returns a CallOptions that retrieves the trailer metadata +// for a unary RPC. +func Trailer(md *metadata.MD) CallOption { + return afterCall(func(c *callInfo) { + *md = c.trailerMD + }) +} + +// The format of the payload: compressed or not? +type payloadFormat uint8 + +const ( + compressionNone payloadFormat = iota // no compression + compressionFlate + // More formats +) + +// parser reads complelete gRPC messages from the underlying reader. +type parser struct { + s io.Reader +} + +// msgFixedHeader defines the header of a gRPC message (go/grpc-wirefmt). +type msgFixedHeader struct { + T payloadFormat + Length uint32 +} + +// recvMsg is to read a complete gRPC message from the stream. It is blocking if +// the message has not been complete yet. It returns the message and its type, +// EOF is returned with nil msg and 0 pf if the entire stream is done. Other +// non-nil error is returned if something is wrong on reading. +func (p *parser) recvMsg() (pf payloadFormat, msg []byte, err error) { + var hdr msgFixedHeader + if err := binary.Read(p.s, binary.BigEndian, &hdr); err != nil { + return 0, nil, err + } + if hdr.Length == 0 { + return hdr.T, nil, nil + } + msg = make([]byte, int(hdr.Length)) + if _, err := io.ReadFull(p.s, msg); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return 0, nil, err + } + return hdr.T, msg, nil +} + +// encode serializes msg and prepends the message header. If msg is nil, it +// generates the message header of 0 message length. +func encode(c Codec, msg interface{}, pf payloadFormat) ([]byte, error) { + var buf bytes.Buffer + // Write message fixed header. + buf.WriteByte(uint8(pf)) + var b []byte + var length uint32 + if msg != nil { + var err error + // TODO(zhaoq): optimize to reduce memory alloc and copying. + b, err = c.Marshal(msg) + if err != nil { + return nil, err + } + length = uint32(len(b)) + } + var szHdr [4]byte + binary.BigEndian.PutUint32(szHdr[:], length) + buf.Write(szHdr[:]) + buf.Write(b) + return buf.Bytes(), nil +} + +func recv(p *parser, c Codec, m interface{}) error { + pf, d, err := p.recvMsg() + if err != nil { + return err + } + switch pf { + case compressionNone: + if err := c.Unmarshal(d, m); err != nil { + return Errorf(codes.Internal, "grpc: %v", err) + } + default: + return Errorf(codes.Internal, "gprc: compression is not supported yet.") + } + return nil +} + +// rpcError defines the status from an RPC. +type rpcError struct { + code codes.Code + desc string +} + +func (e rpcError) Error() string { + return fmt.Sprintf("rpc error: code = %d desc = %q", e.code, e.desc) +} + +// Code returns the error code for err if it was produced by the rpc system. +// Otherwise, it returns codes.Unknown. +func Code(err error) codes.Code { + if err == nil { + return codes.OK + } + if e, ok := err.(rpcError); ok { + return e.code + } + return codes.Unknown +} + +// Errorf returns an error containing an error code and a description; +// Errorf returns nil if c is OK. +func Errorf(c codes.Code, format string, a ...interface{}) error { + if c == codes.OK { + return nil + } + return rpcError{ + code: c, + desc: fmt.Sprintf(format, a...), + } +} + +// toRPCErr converts an error into a rpcError. +func toRPCErr(err error) error { + switch e := err.(type) { + case transport.StreamError: + return rpcError{ + code: e.Code, + desc: e.Desc, + } + case transport.ConnectionError: + return rpcError{ + code: codes.Internal, + desc: e.Desc, + } + } + return Errorf(codes.Unknown, "%v", err) +} + +// convertCode converts a standard Go error into its canonical code. Note that +// this is only used to translate the error returned by the server applications. +func convertCode(err error) codes.Code { + switch err { + case nil: + return codes.OK + case io.EOF: + return codes.OutOfRange + case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF: + return codes.FailedPrecondition + case os.ErrInvalid: + return codes.InvalidArgument + case context.Canceled: + return codes.Canceled + case context.DeadlineExceeded: + return codes.DeadlineExceeded + } + switch { + case os.IsExist(err): + return codes.AlreadyExists + case os.IsNotExist(err): + return codes.NotFound + case os.IsPermission(err): + return codes.PermissionDenied + } + return codes.Unknown +} + +const ( + // how long to wait after the first failure before retrying + baseDelay = 1.0 * time.Second + // upper bound of backoff delay + maxDelay = 120 * time.Second + // backoff increases by this factor on each retry + backoffFactor = 1.6 + // backoff is randomized downwards by this factor + backoffJitter = 0.2 +) + +func backoff(retries int) (t time.Duration) { + if retries == 0 { + return baseDelay + } + backoff, max := float64(baseDelay), float64(maxDelay) + for backoff < max && retries > 0 { + backoff *= backoffFactor + retries-- + } + if backoff > max { + backoff = max + } + // Randomize backoff delays so that if a cluster of requests start at + // the same time, they won't operate in lockstep. + backoff *= 1 + backoffJitter*(rand.Float64()*2-1) + if backoff < 0 { + return 0 + } + return time.Duration(backoff) +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/rpc_util_test.go b/Godeps/_workspace/src/google.golang.org/grpc/rpc_util_test.go new file mode 100644 index 000000000..1a2968411 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/rpc_util_test.go @@ -0,0 +1,190 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "bytes" + "io" + "reflect" + "testing" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + perfpb "google.golang.org/grpc/test/codec_perf" + "google.golang.org/grpc/transport" +) + +func TestSimpleParsing(t *testing.T) { + for _, test := range []struct { + // input + p []byte + // outputs + err error + b []byte + pt payloadFormat + }{ + {nil, io.EOF, nil, compressionNone}, + {[]byte{0, 0, 0, 0, 0}, nil, nil, compressionNone}, + {[]byte{0, 0, 0, 0, 1, 'a'}, nil, []byte{'a'}, compressionNone}, + {[]byte{1, 0}, io.ErrUnexpectedEOF, nil, compressionNone}, + {[]byte{0, 0, 0, 0, 10, 'a'}, io.ErrUnexpectedEOF, nil, compressionNone}, + } { + buf := bytes.NewReader(test.p) + parser := &parser{buf} + pt, b, err := parser.recvMsg() + if err != test.err || !bytes.Equal(b, test.b) || pt != test.pt { + t.Fatalf("parser{%v}.recvMsg() = %v, %v, %v\nwant %v, %v, %v", test.p, pt, b, err, test.pt, test.b, test.err) + } + } +} + +func TestMultipleParsing(t *testing.T) { + // Set a byte stream consists of 3 messages with their headers. + p := []byte{0, 0, 0, 0, 1, 'a', 0, 0, 0, 0, 2, 'b', 'c', 0, 0, 0, 0, 1, 'd'} + b := bytes.NewReader(p) + parser := &parser{b} + + wantRecvs := []struct { + pt payloadFormat + data []byte + }{ + {compressionNone, []byte("a")}, + {compressionNone, []byte("bc")}, + {compressionNone, []byte("d")}, + } + for i, want := range wantRecvs { + pt, data, err := parser.recvMsg() + if err != nil || pt != want.pt || !reflect.DeepEqual(data, want.data) { + t.Fatalf("after %d calls, parser{%v}.recvMsg() = %v, %v, %v\nwant %v, %v, ", + i, p, pt, data, err, want.pt, want.data) + } + } + + pt, data, err := parser.recvMsg() + if err != io.EOF { + t.Fatalf("after %d recvMsgs calls, parser{%v}.recvMsg() = %v, %v, %v\nwant _, _, %v", + len(wantRecvs), p, pt, data, err, io.EOF) + } +} + +func TestEncode(t *testing.T) { + for _, test := range []struct { + // input + msg proto.Message + pt payloadFormat + // outputs + b []byte + err error + }{ + {nil, compressionNone, []byte{0, 0, 0, 0, 0}, nil}, + } { + b, err := encode(protoCodec{}, test.msg, test.pt) + if err != test.err || !bytes.Equal(b, test.b) { + t.Fatalf("encode(_, _, %d) = %v, %v\nwant %v, %v", test.pt, b, err, test.b, test.err) + } + } +} + +func TestToRPCErr(t *testing.T) { + for _, test := range []struct { + // input + errIn error + // outputs + errOut error + }{ + {transport.StreamErrorf(codes.Unknown, ""), Errorf(codes.Unknown, "")}, + {transport.ErrConnClosing, Errorf(codes.Internal, transport.ErrConnClosing.Desc)}, + } { + err := toRPCErr(test.errIn) + if err != test.errOut { + t.Fatalf("toRPCErr{%v} = %v \nwant %v", test.errIn, err, test.errOut) + } + } +} + +func TestContextErr(t *testing.T) { + for _, test := range []struct { + // input + errIn error + // outputs + errOut transport.StreamError + }{ + {context.DeadlineExceeded, transport.StreamErrorf(codes.DeadlineExceeded, "%v", context.DeadlineExceeded)}, + {context.Canceled, transport.StreamErrorf(codes.Canceled, "%v", context.Canceled)}, + } { + err := transport.ContextErr(test.errIn) + if err != test.errOut { + t.Fatalf("ContextErr{%v} = %v \nwant %v", test.errIn, err, test.errOut) + } + } +} + +// bmEncode benchmarks encoding a Protocol Buffer message containing mSize +// bytes. +func bmEncode(b *testing.B, mSize int) { + msg := &perfpb.Buffer{Body: make([]byte, mSize)} + encoded, _ := encode(protoCodec{}, msg, compressionNone) + encodedSz := int64(len(encoded)) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + encode(protoCodec{}, msg, compressionNone) + } + b.SetBytes(encodedSz) +} + +func BenchmarkEncode1B(b *testing.B) { + bmEncode(b, 1) +} + +func BenchmarkEncode1KiB(b *testing.B) { + bmEncode(b, 1024) +} + +func BenchmarkEncode8KiB(b *testing.B) { + bmEncode(b, 8*1024) +} + +func BenchmarkEncode64KiB(b *testing.B) { + bmEncode(b, 64*1024) +} + +func BenchmarkEncode512KiB(b *testing.B) { + bmEncode(b, 512*1024) +} + +func BenchmarkEncode1MiB(b *testing.B) { + bmEncode(b, 1024*1024) +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/server.go b/Godeps/_workspace/src/google.golang.org/grpc/server.go new file mode 100644 index 000000000..feb298875 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/server.go @@ -0,0 +1,463 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "errors" + "fmt" + "io" + "net" + "reflect" + "strings" + "sync" + + "golang.org/x/net/context" + "golang.org/x/net/trace" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/transport" +) + +type methodHandler func(srv interface{}, ctx context.Context, codec Codec, buf []byte) (interface{}, error) + +// MethodDesc represents an RPC service's method specification. +type MethodDesc struct { + MethodName string + Handler methodHandler +} + +// ServiceDesc represents an RPC service's specification. +type ServiceDesc struct { + ServiceName string + // The pointer to the service interface. Used to check whether the user + // provided implementation satisfies the interface requirements. + HandlerType interface{} + Methods []MethodDesc + Streams []StreamDesc +} + +// service consists of the information of the server serving this service and +// the methods in this service. +type service struct { + server interface{} // the server for service methods + md map[string]*MethodDesc + sd map[string]*StreamDesc +} + +// Server is a gRPC server to serve RPC requests. +type Server struct { + opts options + mu sync.Mutex + lis map[net.Listener]bool + conns map[transport.ServerTransport]bool + m map[string]*service // service name -> service info +} + +type options struct { + creds credentials.Credentials + codec Codec + maxConcurrentStreams uint32 +} + +// A ServerOption sets options. +type ServerOption func(*options) + +// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. +func CustomCodec(codec Codec) ServerOption { + return func(o *options) { + o.codec = codec + } +} + +// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number +// of concurrent streams to each ServerTransport. +func MaxConcurrentStreams(n uint32) ServerOption { + return func(o *options) { + o.maxConcurrentStreams = n + } +} + +// Creds returns a ServerOption that sets credentials for server connections. +func Creds(c credentials.Credentials) ServerOption { + return func(o *options) { + o.creds = c + } +} + +// NewServer creates a gRPC server which has no service registered and has not +// started to accept requests yet. +func NewServer(opt ...ServerOption) *Server { + var opts options + for _, o := range opt { + o(&opts) + } + if opts.codec == nil { + // Set the default codec. + opts.codec = protoCodec{} + } + return &Server{ + lis: make(map[net.Listener]bool), + opts: opts, + conns: make(map[transport.ServerTransport]bool), + m: make(map[string]*service), + } +} + +// RegisterService register a service and its implementation to the gRPC +// server. Called from the IDL generated code. This must be called before +// invoking Serve. +func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { + ht := reflect.TypeOf(sd.HandlerType).Elem() + st := reflect.TypeOf(ss) + if !st.Implements(ht) { + grpclog.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) + } + s.register(sd, ss) +} + +func (s *Server) register(sd *ServiceDesc, ss interface{}) { + s.mu.Lock() + defer s.mu.Unlock() + if _, ok := s.m[sd.ServiceName]; ok { + grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) + } + srv := &service{ + server: ss, + md: make(map[string]*MethodDesc), + sd: make(map[string]*StreamDesc), + } + for i := range sd.Methods { + d := &sd.Methods[i] + srv.md[d.MethodName] = d + } + for i := range sd.Streams { + d := &sd.Streams[i] + srv.sd[d.StreamName] = d + } + s.m[sd.ServiceName] = srv +} + +var ( + // ErrServerStopped indicates that the operation is now illegal because of + // the server being stopped. + ErrServerStopped = errors.New("grpc: the server has been stopped") +) + +// Serve accepts incoming connections on the listener lis, creating a new +// ServerTransport and service goroutine for each. The service goroutines +// read gRPC request and then call the registered handlers to reply to them. +// Service returns when lis.Accept fails. +func (s *Server) Serve(lis net.Listener) error { + s.mu.Lock() + if s.lis == nil { + s.mu.Unlock() + return ErrServerStopped + } + s.lis[lis] = true + s.mu.Unlock() + defer func() { + lis.Close() + s.mu.Lock() + delete(s.lis, lis) + s.mu.Unlock() + }() + for { + c, err := lis.Accept() + if err != nil { + return err + } + var authInfo credentials.AuthInfo + if creds, ok := s.opts.creds.(credentials.TransportAuthenticator); ok { + c, authInfo, err = creds.ServerHandshake(c) + if err != nil { + grpclog.Println("grpc: Server.Serve failed to complete security handshake.") + continue + } + } + s.mu.Lock() + if s.conns == nil { + s.mu.Unlock() + c.Close() + return nil + } + st, err := transport.NewServerTransport("http2", c, s.opts.maxConcurrentStreams, authInfo) + if err != nil { + s.mu.Unlock() + c.Close() + grpclog.Println("grpc: Server.Serve failed to create ServerTransport: ", err) + continue + } + s.conns[st] = true + s.mu.Unlock() + + go func() { + st.HandleStreams(func(stream *transport.Stream) { + s.handleStream(st, stream) + }) + s.mu.Lock() + delete(s.conns, st) + s.mu.Unlock() + }() + } +} + +func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, pf payloadFormat, opts *transport.Options) error { + p, err := encode(s.opts.codec, msg, pf) + if err != nil { + // This typically indicates a fatal issue (e.g., memory + // corruption or hardware faults) the application program + // cannot handle. + // + // TODO(zhaoq): There exist other options also such as only closing the + // faulty stream locally and remotely (Other streams can keep going). Find + // the optimal option. + grpclog.Fatalf("grpc: Server failed to encode response %v", err) + } + return t.Write(stream, p, opts) +} + +func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc) (err error) { + var traceInfo traceInfo + if EnableTracing { + traceInfo.tr = trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()) + defer traceInfo.tr.Finish() + traceInfo.firstLine.client = false + traceInfo.tr.LazyLog(&traceInfo.firstLine, false) + defer func() { + if err != nil && err != io.EOF { + traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + traceInfo.tr.SetError() + } + }() + } + p := &parser{s: stream} + for { + pf, req, err := p.recvMsg() + if err == io.EOF { + // The entire stream is done (for unary RPC only). + return err + } + if err != nil { + switch err := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + case transport.StreamError: + if err := t.WriteStatus(stream, err.Code, err.Desc); err != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err) + } + default: + panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", err, err)) + } + return err + } + if traceInfo.tr != nil { + traceInfo.tr.LazyLog(&payload{sent: false, msg: req}, true) + } + switch pf { + case compressionNone: + statusCode := codes.OK + statusDesc := "" + reply, appErr := md.Handler(srv.server, stream.Context(), s.opts.codec, req) + if appErr != nil { + if err, ok := appErr.(rpcError); ok { + statusCode = err.code + statusDesc = err.desc + } else { + statusCode = convertCode(appErr) + statusDesc = appErr.Error() + } + if err := t.WriteStatus(stream, statusCode, statusDesc); err != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err) + return err + } + return nil + } + opts := &transport.Options{ + Last: true, + Delay: false, + } + if err := s.sendResponse(t, stream, reply, compressionNone, opts); err != nil { + switch err := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + case transport.StreamError: + statusCode = err.Code + statusDesc = err.Desc + default: + statusCode = codes.Unknown + statusDesc = err.Error() + } + return err + } + if traceInfo.tr != nil { + traceInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) + } + return t.WriteStatus(stream, statusCode, statusDesc) + default: + panic(fmt.Sprintf("payload format to be supported: %d", pf)) + } + } +} + +func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc) (err error) { + ss := &serverStream{ + t: t, + s: stream, + p: &parser{s: stream}, + codec: s.opts.codec, + tracing: EnableTracing, + } + if ss.tracing { + ss.traceInfo.tr = trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()) + ss.traceInfo.firstLine.client = false + ss.traceInfo.tr.LazyLog(&ss.traceInfo.firstLine, false) + defer func() { + ss.mu.Lock() + if err != nil && err != io.EOF { + ss.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.traceInfo.tr.SetError() + } + ss.traceInfo.tr.Finish() + ss.traceInfo.tr = nil + ss.mu.Unlock() + }() + } + if appErr := sd.Handler(srv.server, ss); appErr != nil { + if err, ok := appErr.(rpcError); ok { + ss.statusCode = err.code + ss.statusDesc = err.desc + } else { + ss.statusCode = convertCode(appErr) + ss.statusDesc = appErr.Error() + } + } + return t.WriteStatus(ss.s, ss.statusCode, ss.statusDesc) + +} + +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { + sm := stream.Method() + if sm != "" && sm[0] == '/' { + sm = sm[1:] + } + pos := strings.LastIndex(sm, "/") + if pos == -1 { + if err := t.WriteStatus(stream, codes.InvalidArgument, fmt.Sprintf("malformed method name: %q", stream.Method())); err != nil { + grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) + } + return + } + service := sm[:pos] + method := sm[pos+1:] + srv, ok := s.m[service] + if !ok { + if err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf("unknown service %v", service)); err != nil { + grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) + } + return + } + // Unary RPC or Streaming RPC? + if md, ok := srv.md[method]; ok { + s.processUnaryRPC(t, stream, srv, md) + return + } + if sd, ok := srv.sd[method]; ok { + s.processStreamingRPC(t, stream, srv, sd) + return + } + if err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf("unknown method %v", method)); err != nil { + grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) + } +} + +// Stop stops the gRPC server. Once Stop returns, the server stops accepting +// connection requests and closes all the connected connections. +func (s *Server) Stop() { + s.mu.Lock() + listeners := s.lis + s.lis = nil + cs := s.conns + s.conns = nil + s.mu.Unlock() + for lis := range listeners { + lis.Close() + } + for c := range cs { + c.Close() + } +} + +// TestingCloseConns closes all exiting transports but keeps s.lis accepting new +// connections. This is for test only now. +func (s *Server) TestingCloseConns() { + s.mu.Lock() + for c := range s.conns { + c.Close() + } + s.conns = make(map[transport.ServerTransport]bool) + s.mu.Unlock() +} + +// SendHeader sends header metadata. It may be called at most once from a unary +// RPC handler. The ctx is the RPC handler's Context or one derived from it. +func SendHeader(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream, ok := transport.StreamFromContext(ctx) + if !ok { + return fmt.Errorf("grpc: failed to fetch the stream from the context %v", ctx) + } + t := stream.ServerTransport() + if t == nil { + grpclog.Fatalf("grpc: SendHeader: %v has no ServerTransport to send header metadata.", stream) + } + return t.WriteHeader(stream, md) +} + +// SetTrailer sets the trailer metadata that will be sent when an RPC returns. +// It may be called at most once from a unary RPC handler. The ctx is the RPC +// handler's Context or one derived from it. +func SetTrailer(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream, ok := transport.StreamFromContext(ctx) + if !ok { + return fmt.Errorf("grpc: failed to fetch the stream from the context %v", ctx) + } + return stream.SetTrailer(md) +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/stream.go b/Godeps/_workspace/src/google.golang.org/grpc/stream.go new file mode 100644 index 000000000..5c99bffc6 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/stream.go @@ -0,0 +1,346 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "errors" + "io" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/trace" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/transport" +) + +type streamHandler func(srv interface{}, stream ServerStream) error + +// StreamDesc represents a streaming RPC service's method specification. +type StreamDesc struct { + StreamName string + Handler streamHandler + + // At least one of these is true. + ServerStreams bool + ClientStreams bool +} + +// Stream defines the common interface a client or server stream has to satisfy. +type Stream interface { + // Context returns the context for this stream. + Context() context.Context + // SendMsg blocks until it sends m, the stream is done or the stream + // breaks. + // On error, it aborts the stream and returns an RPC status on client + // side. On server side, it simply returns the error to the caller. + // SendMsg is called by generated code. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message or the stream is + // done. On client side, it returns io.EOF when the stream is done. On + // any other error, it aborts the streama nd returns an RPC status. On + // server side, it simply returns the error to the caller. + RecvMsg(m interface{}) error +} + +// ClientStream defines the interface a client stream has to satify. +type ClientStream interface { + // Header returns the header metedata received from the server if there + // is any. It blocks if the metadata is not ready to read. + Header() (metadata.MD, error) + // Trailer returns the trailer metadata from the server. It must be called + // after stream.Recv() returns non-nil error (including io.EOF) for + // bi-directional streaming and server streaming or stream.CloseAndRecv() + // returns for client streaming in order to receive trailer metadata if + // present. Otherwise, it could returns an empty MD even though trailer + // is present. + Trailer() metadata.MD + // CloseSend closes the send direction of the stream. It closes the stream + // when non-nil error is met. + CloseSend() error + Stream +} + +// NewClientStream creates a new Stream for the client side. This is called +// by generated code. +func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { + // TODO(zhaoq): CallOption is omitted. Add support when it is needed. + callHdr := &transport.CallHdr{ + Host: cc.authority, + Method: method, + } + cs := &clientStream{ + desc: desc, + codec: cc.dopts.codec, + tracing: EnableTracing, + } + if cs.tracing { + cs.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) + cs.traceInfo.firstLine.client = true + if deadline, ok := ctx.Deadline(); ok { + cs.traceInfo.firstLine.deadline = deadline.Sub(time.Now()) + } + cs.traceInfo.tr.LazyLog(&cs.traceInfo.firstLine, false) + } + t, _, err := cc.wait(ctx, 0) + if err != nil { + return nil, toRPCErr(err) + } + s, err := t.NewStream(ctx, callHdr) + if err != nil { + return nil, toRPCErr(err) + } + cs.t = t + cs.s = s + cs.p = &parser{s: s} + return cs, nil +} + +// clientStream implements a client side Stream. +type clientStream struct { + t transport.ClientTransport + s *transport.Stream + p *parser + desc *StreamDesc + codec Codec + + tracing bool // set to EnableTracing when the clientStream is created. + + mu sync.Mutex // protects traceInfo + // traceInfo.tr is set when the clientStream is created (if EnableTracing is true), + // and is set to nil when the clientStream's finish method is called. + traceInfo traceInfo +} + +func (cs *clientStream) Context() context.Context { + return cs.s.Context() +} + +func (cs *clientStream) Header() (metadata.MD, error) { + m, err := cs.s.Header() + if err != nil { + if _, ok := err.(transport.ConnectionError); !ok { + cs.t.CloseStream(cs.s, err) + } + } + return m, err +} + +func (cs *clientStream) Trailer() metadata.MD { + return cs.s.Trailer() +} + +func (cs *clientStream) SendMsg(m interface{}) (err error) { + if cs.tracing { + cs.mu.Lock() + if cs.traceInfo.tr != nil { + cs.traceInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } + cs.mu.Unlock() + } + defer func() { + if err == nil || err == io.EOF { + return + } + if _, ok := err.(transport.ConnectionError); !ok { + cs.t.CloseStream(cs.s, err) + } + err = toRPCErr(err) + }() + out, err := encode(cs.codec, m, compressionNone) + if err != nil { + return transport.StreamErrorf(codes.Internal, "grpc: %v", err) + } + return cs.t.Write(cs.s, out, &transport.Options{Last: false}) +} + +func (cs *clientStream) RecvMsg(m interface{}) (err error) { + err = recv(cs.p, cs.codec, m) + defer func() { + // err != nil indicates the termination of the stream. + if err != nil { + cs.finish(err) + } + }() + if err == nil { + if cs.tracing { + cs.mu.Lock() + if cs.traceInfo.tr != nil { + cs.traceInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } + cs.mu.Unlock() + } + if !cs.desc.ClientStreams || cs.desc.ServerStreams { + return + } + // Special handling for client streaming rpc. + err = recv(cs.p, cs.codec, m) + cs.t.CloseStream(cs.s, err) + if err == nil { + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + } + if err == io.EOF { + if cs.s.StatusCode() == codes.OK { + return nil + } + return Errorf(cs.s.StatusCode(), cs.s.StatusDesc()) + } + return toRPCErr(err) + } + if _, ok := err.(transport.ConnectionError); !ok { + cs.t.CloseStream(cs.s, err) + } + if err == io.EOF { + if cs.s.StatusCode() == codes.OK { + // Returns io.EOF to indicate the end of the stream. + return + } + return Errorf(cs.s.StatusCode(), cs.s.StatusDesc()) + } + return toRPCErr(err) +} + +func (cs *clientStream) CloseSend() (err error) { + err = cs.t.Write(cs.s, nil, &transport.Options{Last: true}) + if err == nil || err == io.EOF { + return + } + if _, ok := err.(transport.ConnectionError); !ok { + cs.t.CloseStream(cs.s, err) + } + err = toRPCErr(err) + return +} + +func (cs *clientStream) finish(err error) { + if !cs.tracing { + return + } + cs.mu.Lock() + defer cs.mu.Unlock() + if cs.traceInfo.tr != nil { + if err == nil || err == io.EOF { + cs.traceInfo.tr.LazyPrintf("RPC: [OK]") + } else { + cs.traceInfo.tr.LazyPrintf("RPC: [%v]", err) + cs.traceInfo.tr.SetError() + } + cs.traceInfo.tr.Finish() + cs.traceInfo.tr = nil + } +} + +// ServerStream defines the interface a server stream has to satisfy. +type ServerStream interface { + // SendHeader sends the header metadata. It should not be called + // after SendProto. It fails if called multiple times or if + // called after SendProto. + SendHeader(metadata.MD) error + // SetTrailer sets the trailer metadata which will be sent with the + // RPC status. + SetTrailer(metadata.MD) + Stream +} + +// serverStream implements a server side Stream. +type serverStream struct { + t transport.ServerTransport + s *transport.Stream + p *parser + codec Codec + statusCode codes.Code + statusDesc string + + tracing bool // set to EnableTracing when the serverStream is created. + + mu sync.Mutex // protects traceInfo + // traceInfo.tr is set when the serverStream is created (if EnableTracing is true), + // and is set to nil when the serverStream's finish method is called. + traceInfo traceInfo +} + +func (ss *serverStream) Context() context.Context { + return ss.s.Context() +} + +func (ss *serverStream) SendHeader(md metadata.MD) error { + return ss.t.WriteHeader(ss.s, md) +} + +func (ss *serverStream) SetTrailer(md metadata.MD) { + if md.Len() == 0 { + return + } + ss.s.SetTrailer(md) + return +} + +func (ss *serverStream) SendMsg(m interface{}) (err error) { + defer func() { + if ss.tracing { + ss.mu.Lock() + if err == nil { + ss.traceInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } else { + ss.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.traceInfo.tr.SetError() + } + + ss.mu.Unlock() + } + }() + out, err := encode(ss.codec, m, compressionNone) + if err != nil { + err = transport.StreamErrorf(codes.Internal, "grpc: %v", err) + return err + } + return ss.t.Write(ss.s, out, &transport.Options{Last: false}) +} + +func (ss *serverStream) RecvMsg(m interface{}) (err error) { + defer func() { + if ss.tracing { + ss.mu.Lock() + if err == nil { + ss.traceInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } else if err != io.EOF { + ss.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.traceInfo.tr.SetError() + } + ss.mu.Unlock() + } + }() + return recv(ss.p, ss.codec, m) +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/test/codec_perf/perf.pb.go b/Godeps/_workspace/src/google.golang.org/grpc/test/codec_perf/perf.pb.go new file mode 100644 index 000000000..5f72c5570 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/test/codec_perf/perf.pb.go @@ -0,0 +1,42 @@ +// Code generated by protoc-gen-go. +// source: perf.proto +// DO NOT EDIT! + +/* +Package codec_perf is a generated protocol buffer package. + +It is generated from these files: + perf.proto + +It has these top-level messages: + Buffer +*/ +package codec_perf + +import proto "github.com/golang/protobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +// Buffer is a message that contains a body of bytes that is used to exercise +// encoding and decoding overheads. +type Buffer struct { + Body []byte `protobuf:"bytes,1,opt,name=body" json:"body,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Buffer) Reset() { *m = Buffer{} } +func (m *Buffer) String() string { return proto.CompactTextString(m) } +func (*Buffer) ProtoMessage() {} + +func (m *Buffer) GetBody() []byte { + if m != nil { + return m.Body + } + return nil +} + +func init() { +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/test/codec_perf/perf.proto b/Godeps/_workspace/src/google.golang.org/grpc/test/codec_perf/perf.proto new file mode 100644 index 000000000..4cf561f7f --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/test/codec_perf/perf.proto @@ -0,0 +1,11 @@ +// Messages used for performance tests that may not reference grpc directly for +// reasons of import cycles. +syntax = "proto2"; + +package codec.perf; + +// Buffer is a message that contains a body of bytes that is used to exercise +// encoding and decoding overheads. +message Buffer { + optional bytes body = 1; +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/test/end2end_test.go b/Godeps/_workspace/src/google.golang.org/grpc/test/end2end_test.go new file mode 100644 index 000000000..46a6fe3a3 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/test/end2end_test.go @@ -0,0 +1,1091 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc_test + +import ( + "fmt" + "io" + "math" + "net" + "reflect" + "runtime" + "sync" + "syscall" + "testing" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/health" + healthpb "google.golang.org/grpc/health/grpc_health_v1alpha" + "google.golang.org/grpc/metadata" + testpb "google.golang.org/grpc/test/grpc_testing" +) + +var ( + testMetadata = metadata.MD{ + "key1": []string{"value1"}, + "key2": []string{"value2"}, + } + testAppUA = "myApp1/1.0 myApp2/0.9" +) + +type testServer struct { + security string // indicate the authentication protocol used by this server. +} + +func (s *testServer) EmptyCall(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + if md, ok := metadata.FromContext(ctx); ok { + // For testing purpose, returns an error if there is attached metadata other than + // the user agent set by the client application. + if _, ok := md["user-agent"]; !ok { + return nil, grpc.Errorf(codes.DataLoss, "got extra metadata") + } + var str []string + for _, entry := range md["user-agent"] { + str = append(str, "ua", entry) + } + grpc.SendHeader(ctx, metadata.Pairs(str...)) + } + return new(testpb.Empty), nil +} + +func newPayload(t testpb.PayloadType, size int32) (*testpb.Payload, error) { + if size < 0 { + return nil, fmt.Errorf("Requested a response with invalid length %d", size) + } + body := make([]byte, size) + switch t { + case testpb.PayloadType_COMPRESSABLE: + case testpb.PayloadType_UNCOMPRESSABLE: + return nil, fmt.Errorf("PayloadType UNCOMPRESSABLE is not supported") + default: + return nil, fmt.Errorf("Unsupported payload type: %d", t) + } + return &testpb.Payload{ + Type: t.Enum(), + Body: body, + }, nil +} + +func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + md, ok := metadata.FromContext(ctx) + if ok { + if err := grpc.SendHeader(ctx, md); err != nil { + return nil, fmt.Errorf("grpc.SendHeader(%v, %v) = %v, want %v", ctx, md, err, nil) + } + grpc.SetTrailer(ctx, md) + } + if s.security != "" { + // Check Auth info + authInfo, ok := credentials.FromContext(ctx) + if !ok { + return nil, fmt.Errorf("Failed to get AuthInfo from ctx.") + } + var authType, serverName string + switch info := authInfo.(type) { + case credentials.TLSInfo: + authType = info.AuthType() + serverName = info.State.ServerName + default: + return nil, fmt.Errorf("Unknown AuthInfo type") + } + if authType != s.security { + return nil, fmt.Errorf("Wrong auth type: got %q, want %q", authType, s.security) + } + if serverName != "x.test.youtube.com" { + return nil, fmt.Errorf("Unknown server name %q", serverName) + } + } + + // Simulate some service delay. + time.Sleep(time.Second) + + payload, err := newPayload(in.GetResponseType(), in.GetResponseSize()) + if err != nil { + return nil, err + } + + return &testpb.SimpleResponse{ + Payload: payload, + }, nil +} + +func (s *testServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest, stream testpb.TestService_StreamingOutputCallServer) error { + if md, ok := metadata.FromContext(stream.Context()); ok { + // For testing purpose, returns an error if there is attached metadata. + if len(md) > 0 { + return grpc.Errorf(codes.DataLoss, "got extra metadata") + } + } + cs := args.GetResponseParameters() + for _, c := range cs { + if us := c.GetIntervalUs(); us > 0 { + time.Sleep(time.Duration(us) * time.Microsecond) + } + + payload, err := newPayload(args.GetResponseType(), c.GetSize()) + if err != nil { + return err + } + + if err := stream.Send(&testpb.StreamingOutputCallResponse{ + Payload: payload, + }); err != nil { + return err + } + } + return nil +} + +func (s *testServer) StreamingInputCall(stream testpb.TestService_StreamingInputCallServer) error { + var sum int + for { + in, err := stream.Recv() + if err == io.EOF { + return stream.SendAndClose(&testpb.StreamingInputCallResponse{ + AggregatedPayloadSize: proto.Int32(int32(sum)), + }) + } + if err != nil { + return err + } + p := in.GetPayload().GetBody() + sum += len(p) + } +} + +func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error { + md, ok := metadata.FromContext(stream.Context()) + if ok { + if err := stream.SendHeader(md); err != nil { + return fmt.Errorf("%v.SendHeader(%v) = %v, want %v", stream, md, err, nil) + } + stream.SetTrailer(md) + } + for { + in, err := stream.Recv() + if err == io.EOF { + // read done. + return nil + } + if err != nil { + return err + } + cs := in.GetResponseParameters() + for _, c := range cs { + if us := c.GetIntervalUs(); us > 0 { + time.Sleep(time.Duration(us) * time.Microsecond) + } + + payload, err := newPayload(in.GetResponseType(), c.GetSize()) + if err != nil { + return err + } + + if err := stream.Send(&testpb.StreamingOutputCallResponse{ + Payload: payload, + }); err != nil { + return err + } + } + } +} + +func (s *testServer) HalfDuplexCall(stream testpb.TestService_HalfDuplexCallServer) error { + var msgBuf []*testpb.StreamingOutputCallRequest + for { + in, err := stream.Recv() + if err == io.EOF { + // read done. + break + } + if err != nil { + return err + } + msgBuf = append(msgBuf, in) + } + for _, m := range msgBuf { + cs := m.GetResponseParameters() + for _, c := range cs { + if us := c.GetIntervalUs(); us > 0 { + time.Sleep(time.Duration(us) * time.Microsecond) + } + + payload, err := newPayload(m.GetResponseType(), c.GetSize()) + if err != nil { + return err + } + + if err := stream.Send(&testpb.StreamingOutputCallResponse{ + Payload: payload, + }); err != nil { + return err + } + } + } + return nil +} + +const tlsDir = "testdata/" + +func TestDialTimeout(t *testing.T) { + conn, err := grpc.Dial("Non-Existent.Server:80", grpc.WithTimeout(time.Millisecond), grpc.WithBlock(), grpc.WithInsecure()) + if err == nil { + conn.Close() + } + if err != grpc.ErrClientConnTimeout { + t.Fatalf("grpc.Dial(_, _) = %v, %v, want %v", conn, err, grpc.ErrClientConnTimeout) + } +} + +func TestTLSDialTimeout(t *testing.T) { + creds, err := credentials.NewClientTLSFromFile(tlsDir+"ca.pem", "x.test.youtube.com") + if err != nil { + t.Fatalf("Failed to create credentials %v", err) + } + conn, err := grpc.Dial("Non-Existent.Server:80", grpc.WithTransportCredentials(creds), grpc.WithTimeout(time.Millisecond), grpc.WithBlock()) + if err == nil { + conn.Close() + } + if err != grpc.ErrClientConnTimeout { + t.Fatalf("grpc.Dial(_, _) = %v, %v, want %v", conn, err, grpc.ErrClientConnTimeout) + } +} + +func TestCredentialsMisuse(t *testing.T) { + creds, err := credentials.NewClientTLSFromFile(tlsDir+"ca.pem", "x.test.youtube.com") + if err != nil { + t.Fatalf("Failed to create credentials %v", err) + } + // Two conflicting credential configurations + if _, err := grpc.Dial("Non-Existent.Server:80", grpc.WithTransportCredentials(creds), grpc.WithTimeout(time.Millisecond), grpc.WithBlock(), grpc.WithInsecure()); err != grpc.ErrCredentialsMisuse { + t.Fatalf("grpc.Dial(_, _) = _, %v, want _, %v", err, grpc.ErrCredentialsMisuse) + } + // security info on insecure connection + if _, err := grpc.Dial("Non-Existent.Server:80", grpc.WithPerRPCCredentials(creds), grpc.WithTimeout(time.Millisecond), grpc.WithBlock(), grpc.WithInsecure()); err != grpc.ErrCredentialsMisuse { + t.Fatalf("grpc.Dial(_, _) = _, %v, want _, %v", err, grpc.ErrCredentialsMisuse) + } +} + +func TestReconnectTimeout(t *testing.T) { + lis, err := net.Listen("tcp", ":0") + if err != nil { + t.Fatalf("Failed to listen: %v", err) + } + _, port, err := net.SplitHostPort(lis.Addr().String()) + if err != nil { + t.Fatalf("Failed to parse listener address: %v", err) + } + addr := "localhost:" + port + conn, err := grpc.Dial(addr, grpc.WithTimeout(5*time.Second), grpc.WithBlock(), grpc.WithInsecure()) + if err != nil { + t.Fatalf("Failed to dial to the server %q: %v", addr, err) + } + // Close unaccepted connection (i.e., conn). + lis.Close() + tc := testpb.NewTestServiceClient(conn) + waitC := make(chan struct{}) + go func() { + defer close(waitC) + argSize := 271828 + respSize := 314159 + + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)) + if err != nil { + t.Fatal(err) + } + + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(respSize)), + Payload: payload, + } + if _, err := tc.UnaryCall(context.Background(), req); err == nil { + t.Fatalf("TestService/UnaryCall(_, _) = _, , want _, non-nil") + } + }() + // Block untill reconnect times out. + <-waitC + if err := conn.Close(); err != grpc.ErrClientConnClosing { + t.Fatalf("%v.Close() = %v, want %v", conn, err, grpc.ErrClientConnClosing) + } +} + +func unixDialer(addr string, timeout time.Duration) (net.Conn, error) { + return net.DialTimeout("unix", addr, timeout) +} + +type env struct { + network string // The type of network such as tcp, unix, etc. + dialer func(addr string, timeout time.Duration) (net.Conn, error) + security string // The security protocol such as TLS, SSH, etc. +} + +func listTestEnv() []env { + if runtime.GOOS == "windows" { + return []env{{"tcp", nil, ""}, {"tcp", nil, "tls"}} + } + return []env{{"tcp", nil, ""}, {"tcp", nil, "tls"}, {"unix", unixDialer, ""}, {"unix", unixDialer, "tls"}} +} + +func setUp(t *testing.T, hs *health.HealthServer, maxStream uint32, ua string, e env) (s *grpc.Server, cc *grpc.ClientConn) { + sopts := []grpc.ServerOption{grpc.MaxConcurrentStreams(maxStream)} + la := ":0" + switch e.network { + case "unix": + la = "/tmp/testsock" + fmt.Sprintf("%d", time.Now()) + syscall.Unlink(la) + } + lis, err := net.Listen(e.network, la) + if err != nil { + t.Fatalf("Failed to listen: %v", err) + } + if e.security == "tls" { + creds, err := credentials.NewServerTLSFromFile(tlsDir+"server1.pem", tlsDir+"server1.key") + if err != nil { + t.Fatalf("Failed to generate credentials %v", err) + } + sopts = append(sopts, grpc.Creds(creds)) + } + s = grpc.NewServer(sopts...) + if hs != nil { + healthpb.RegisterHealthCheckServer(s, hs) + } + testpb.RegisterTestServiceServer(s, &testServer{security: e.security}) + go s.Serve(lis) + addr := la + switch e.network { + case "unix": + default: + _, port, err := net.SplitHostPort(lis.Addr().String()) + if err != nil { + t.Fatalf("Failed to parse listener address: %v", err) + } + addr = "localhost:" + port + } + if e.security == "tls" { + creds, err := credentials.NewClientTLSFromFile(tlsDir+"ca.pem", "x.test.youtube.com") + if err != nil { + t.Fatalf("Failed to create credentials %v", err) + } + cc, err = grpc.Dial(addr, grpc.WithTransportCredentials(creds), grpc.WithDialer(e.dialer), grpc.WithUserAgent(ua)) + } else { + cc, err = grpc.Dial(addr, grpc.WithDialer(e.dialer), grpc.WithInsecure(), grpc.WithUserAgent(ua)) + } + if err != nil { + t.Fatalf("Dial(%q) = %v", addr, err) + } + return +} + +func tearDown(s *grpc.Server, cc *grpc.ClientConn) { + cc.Close() + s.Stop() +} + +func TestTimeoutOnDeadServer(t *testing.T) { + for _, e := range listTestEnv() { + testTimeoutOnDeadServer(t, e) + } +} + +func testTimeoutOnDeadServer(t *testing.T, e env) { + s, cc := setUp(t, nil, math.MaxUint32, "", e) + tc := testpb.NewTestServiceClient(cc) + if ok := cc.WaitForStateChange(time.Second, grpc.Idle); !ok { + t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Idle, ok) + } + if ok := cc.WaitForStateChange(time.Second, grpc.Connecting); !ok { + t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Connecting, ok) + } + if cc.State() != grpc.Ready { + t.Fatalf("cc.State() = %s, want %s", cc.State(), grpc.Ready) + } + if ok := cc.WaitForStateChange(time.Millisecond, grpc.Ready); ok { + t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want false", grpc.Ready, ok) + } + s.Stop() + // Set -1 as the timeout to make sure if transportMonitor gets error + // notification in time the failure path of the 1st invoke of + // ClientConn.wait hits the deadline exceeded error. + ctx, _ := context.WithTimeout(context.Background(), -1) + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/EmptyCall(%v, _) = _, error %v, want _, error code: %d", ctx, err, codes.DeadlineExceeded) + } + if ok := cc.WaitForStateChange(time.Second, grpc.Ready); !ok { + t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Ready, ok) + } + state := cc.State() + if state != grpc.Connecting && state != grpc.TransientFailure { + t.Fatalf("cc.State() = %s, want %s or %s", state, grpc.Connecting, grpc.TransientFailure) + } + cc.Close() +} + +func healthCheck(t time.Duration, cc *grpc.ClientConn, serviceName string) (*healthpb.HealthCheckResponse, error) { + ctx, _ := context.WithTimeout(context.Background(), t) + hc := healthpb.NewHealthCheckClient(cc) + req := &healthpb.HealthCheckRequest{ + Service: serviceName, + } + return hc.Check(ctx, req) +} + +func TestHealthCheckOnSuccess(t *testing.T) { + for _, e := range listTestEnv() { + testHealthCheckOnSuccess(t, e) + } +} + +func testHealthCheckOnSuccess(t *testing.T, e env) { + hs := health.NewHealthServer() + hs.SetServingStatus("grpc.health.v1alpha.HealthCheck", 1) + s, cc := setUp(t, hs, math.MaxUint32, "", e) + defer tearDown(s, cc) + if _, err := healthCheck(1*time.Second, cc, "grpc.health.v1alpha.HealthCheck"); err != nil { + t.Fatalf("HealthCheck/Check(_, _) = _, %v, want _, ", err) + } +} + +func TestHealthCheckOnFailure(t *testing.T) { + for _, e := range listTestEnv() { + testHealthCheckOnFailure(t, e) + } +} + +func testHealthCheckOnFailure(t *testing.T, e env) { + hs := health.NewHealthServer() + hs.SetServingStatus("grpc.health.v1alpha.HealthCheck", 1) + s, cc := setUp(t, hs, math.MaxUint32, "", e) + defer tearDown(s, cc) + if _, err := healthCheck(0*time.Second, cc, "grpc.health.v1alpha.HealthCheck"); err != grpc.Errorf(codes.DeadlineExceeded, "context deadline exceeded") { + t.Fatalf("HealthCheck/Check(_, _) = _, %v, want _, error code %d", err, codes.DeadlineExceeded) + } +} + +func TestHealthCheckOff(t *testing.T) { + for _, e := range listTestEnv() { + testHealthCheckOff(t, e) + } +} + +func testHealthCheckOff(t *testing.T, e env) { + s, cc := setUp(t, nil, math.MaxUint32, "", e) + defer tearDown(s, cc) + if _, err := healthCheck(1*time.Second, cc, ""); err != grpc.Errorf(codes.Unimplemented, "unknown service grpc.health.v1alpha.HealthCheck") { + t.Fatalf("HealthCheck/Check(_, _) = _, %v, want _, error code %d", err, codes.Unimplemented) + } +} + +func TestHealthCheckServingStatus(t *testing.T) { + for _, e := range listTestEnv() { + testHealthCheckServingStatus(t, e) + } +} + +func testHealthCheckServingStatus(t *testing.T, e env) { + hs := health.NewHealthServer() + s, cc := setUp(t, hs, math.MaxUint32, "", e) + defer tearDown(s, cc) + out, err := healthCheck(1*time.Second, cc, "") + if err != nil { + t.Fatalf("HealthCheck/Check(_, _) = _, %v, want _, ", err) + } + if out.Status != healthpb.HealthCheckResponse_SERVING { + t.Fatalf("Got the serving status %v, want SERVING", out.Status) + } + if _, err := healthCheck(1*time.Second, cc, "grpc.health.v1alpha.HealthCheck"); err != grpc.Errorf(codes.NotFound, "unknown service") { + t.Fatalf("HealthCheck/Check(_, _) = _, %v, want _, error code %d", err, codes.NotFound) + } + hs.SetServingStatus("grpc.health.v1alpha.HealthCheck", healthpb.HealthCheckResponse_SERVING) + out, err = healthCheck(1*time.Second, cc, "grpc.health.v1alpha.HealthCheck") + if err != nil { + t.Fatalf("HealthCheck/Check(_, _) = _, %v, want _, ", err) + } + if out.Status != healthpb.HealthCheckResponse_SERVING { + t.Fatalf("Got the serving status %v, want SERVING", out.Status) + } + hs.SetServingStatus("grpc.health.v1alpha.HealthCheck", healthpb.HealthCheckResponse_NOT_SERVING) + out, err = healthCheck(1*time.Second, cc, "grpc.health.v1alpha.HealthCheck") + if err != nil { + t.Fatalf("HealthCheck/Check(_, _) = _, %v, want _, ", err) + } + if out.Status != healthpb.HealthCheckResponse_NOT_SERVING { + t.Fatalf("Got the serving status %v, want NOT_SERVING", out.Status) + } + +} + +func TestEmptyUnaryWithUserAgent(t *testing.T) { + for _, e := range listTestEnv() { + testEmptyUnaryWithUserAgent(t, e) + } +} + +func testEmptyUnaryWithUserAgent(t *testing.T, e env) { + s, cc := setUp(t, nil, math.MaxUint32, testAppUA, e) + // Wait until cc is connected. + if ok := cc.WaitForStateChange(time.Second, grpc.Idle); !ok { + t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Idle, ok) + } + if ok := cc.WaitForStateChange(10*time.Second, grpc.Connecting); !ok { + t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Connecting, ok) + } + if cc.State() != grpc.Ready { + t.Fatalf("cc.State() = %s, want %s", cc.State(), grpc.Ready) + } + if ok := cc.WaitForStateChange(time.Second, grpc.Ready); ok { + t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want false", grpc.Ready, ok) + } + tc := testpb.NewTestServiceClient(cc) + var header metadata.MD + reply, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Header(&header)) + if err != nil || !proto.Equal(&testpb.Empty{}, reply) { + t.Fatalf("TestService/EmptyCall(_, _) = %v, %v, want %v, ", reply, err, &testpb.Empty{}) + } + if v, ok := header["ua"]; !ok || v[0] != testAppUA { + t.Fatalf("header[\"ua\"] = %q, %t, want %q, true", v, ok, testAppUA) + } + tearDown(s, cc) + if ok := cc.WaitForStateChange(5*time.Second, grpc.Ready); !ok { + t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Ready, ok) + } + if cc.State() != grpc.Shutdown { + t.Fatalf("cc.State() = %s, want %s", cc.State(), grpc.Shutdown) + } +} + +func TestFailedEmptyUnary(t *testing.T) { + for _, e := range listTestEnv() { + testFailedEmptyUnary(t, e) + } +} + +func testFailedEmptyUnary(t *testing.T, e env) { + s, cc := setUp(t, nil, math.MaxUint32, "", e) + tc := testpb.NewTestServiceClient(cc) + defer tearDown(s, cc) + ctx := metadata.NewContext(context.Background(), testMetadata) + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != grpc.Errorf(codes.DataLoss, "got extra metadata") { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %v", err, grpc.Errorf(codes.DataLoss, "got extra metadata")) + } +} + +func TestLargeUnary(t *testing.T) { + for _, e := range listTestEnv() { + testLargeUnary(t, e) + } +} + +func testLargeUnary(t *testing.T, e env) { + s, cc := setUp(t, nil, math.MaxUint32, "", e) + tc := testpb.NewTestServiceClient(cc) + defer tearDown(s, cc) + argSize := 271828 + respSize := 314159 + + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)) + if err != nil { + t.Fatal(err) + } + + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(respSize)), + Payload: payload, + } + reply, err := tc.UnaryCall(context.Background(), req) + if err != nil { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, ", err) + } + pt := reply.GetPayload().GetType() + ps := len(reply.GetPayload().GetBody()) + if pt != testpb.PayloadType_COMPRESSABLE || ps != respSize { + t.Fatalf("Got the reply with type %d len %d; want %d, %d", pt, ps, testpb.PayloadType_COMPRESSABLE, respSize) + } +} + +func TestMetadataUnaryRPC(t *testing.T) { + for _, e := range listTestEnv() { + testMetadataUnaryRPC(t, e) + } +} + +func testMetadataUnaryRPC(t *testing.T, e env) { + s, cc := setUp(t, nil, math.MaxUint32, "", e) + tc := testpb.NewTestServiceClient(cc) + defer tearDown(s, cc) + argSize := 2718 + respSize := 314 + + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)) + if err != nil { + t.Fatal(err) + } + + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(respSize)), + Payload: payload, + } + var header, trailer metadata.MD + ctx := metadata.NewContext(context.Background(), testMetadata) + if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.Trailer(&trailer)); err != nil { + t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, ", ctx, err) + } + if !reflect.DeepEqual(testMetadata, header) { + t.Fatalf("Received header metadata %v, want %v", header, testMetadata) + } + if !reflect.DeepEqual(testMetadata, trailer) { + t.Fatalf("Received trailer metadata %v, want %v", trailer, testMetadata) + } +} + +func performOneRPC(t *testing.T, tc testpb.TestServiceClient, wg *sync.WaitGroup) { + argSize := 2718 + respSize := 314 + + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)) + if err != nil { + t.Fatal(err) + } + + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(respSize)), + Payload: payload, + } + reply, err := tc.UnaryCall(context.Background(), req) + if err != nil { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, ", err) + } + pt := reply.GetPayload().GetType() + ps := len(reply.GetPayload().GetBody()) + if pt != testpb.PayloadType_COMPRESSABLE || ps != respSize { + t.Fatalf("Got the reply with type %d len %d; want %d, %d", pt, ps, testpb.PayloadType_COMPRESSABLE, respSize) + } + wg.Done() +} + +func TestRetry(t *testing.T) { + for _, e := range listTestEnv() { + testRetry(t, e) + } +} + +// This test mimics a user who sends 1000 RPCs concurrently on a faulty transport. +// TODO(zhaoq): Refactor to make this clearer and add more cases to test racy +// and error-prone paths. +func testRetry(t *testing.T, e env) { + s, cc := setUp(t, nil, math.MaxUint32, "", e) + tc := testpb.NewTestServiceClient(cc) + defer tearDown(s, cc) + var wg sync.WaitGroup + wg.Add(1) + go func() { + time.Sleep(1 * time.Second) + // The server shuts down the network connection to make a + // transport error which will be detected by the client side + // code. + s.TestingCloseConns() + wg.Done() + }() + // All these RPCs should succeed eventually. + for i := 0; i < 1000; i++ { + time.Sleep(2 * time.Millisecond) + wg.Add(1) + go performOneRPC(t, tc, &wg) + } + wg.Wait() +} + +func TestRPCTimeout(t *testing.T) { + for _, e := range listTestEnv() { + testRPCTimeout(t, e) + } +} + +// TODO(zhaoq): Have a better test coverage of timeout and cancellation mechanism. +func testRPCTimeout(t *testing.T, e env) { + s, cc := setUp(t, nil, math.MaxUint32, "", e) + tc := testpb.NewTestServiceClient(cc) + defer tearDown(s, cc) + argSize := 2718 + respSize := 314 + + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)) + if err != nil { + t.Fatal(err) + } + + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(respSize)), + Payload: payload, + } + for i := -1; i <= 10; i++ { + ctx, _ := context.WithTimeout(context.Background(), time.Duration(i)*time.Millisecond) + reply, err := tc.UnaryCall(ctx, req) + if grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf(`TestService/UnaryCallv(_, _) = %v, %v; want , error code: %d`, reply, err, codes.DeadlineExceeded) + } + } +} + +func TestCancel(t *testing.T) { + for _, e := range listTestEnv() { + testCancel(t, e) + } +} + +func testCancel(t *testing.T, e env) { + s, cc := setUp(t, nil, math.MaxUint32, "", e) + tc := testpb.NewTestServiceClient(cc) + defer tearDown(s, cc) + argSize := 2718 + respSize := 314 + + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)) + if err != nil { + t.Fatal(err) + } + + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(respSize)), + Payload: payload, + } + ctx, cancel := context.WithCancel(context.Background()) + time.AfterFunc(1*time.Millisecond, cancel) + reply, err := tc.UnaryCall(ctx, req) + if grpc.Code(err) != codes.Canceled { + t.Fatalf(`TestService/UnaryCall(_, _) = %v, %v; want , error code: %d`, reply, err, codes.Canceled) + } +} + +// The following tests the gRPC streaming RPC implementations. +// TODO(zhaoq): Have better coverage on error cases. +var ( + reqSizes = []int{27182, 8, 1828, 45904} + respSizes = []int{31415, 9, 2653, 58979} +) + +func TestPingPong(t *testing.T) { + for _, e := range listTestEnv() { + testPingPong(t, e) + } +} + +func testPingPong(t *testing.T, e env) { + s, cc := setUp(t, nil, math.MaxUint32, "", e) + tc := testpb.NewTestServiceClient(cc) + defer tearDown(s, cc) + stream, err := tc.FullDuplexCall(context.Background()) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + var index int + for index < len(reqSizes) { + respParam := []*testpb.ResponseParameters{ + { + Size: proto.Int32(int32(respSizes[index])), + }, + } + + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index])) + if err != nil { + t.Fatal(err) + } + + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + Payload: payload, + } + if err := stream.Send(req); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, req, err) + } + reply, err := stream.Recv() + if err != nil { + t.Fatalf("%v.Recv() = %v, want ", stream, err) + } + pt := reply.GetPayload().GetType() + if pt != testpb.PayloadType_COMPRESSABLE { + t.Fatalf("Got the reply of type %d, want %d", pt, testpb.PayloadType_COMPRESSABLE) + } + size := len(reply.GetPayload().GetBody()) + if size != int(respSizes[index]) { + t.Fatalf("Got reply body of length %d, want %d", size, respSizes[index]) + } + index++ + } + if err := stream.CloseSend(); err != nil { + t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil) + } + if _, err := stream.Recv(); err != io.EOF { + t.Fatalf("%v failed to complele the ping pong test: %v", stream, err) + } +} + +func TestMetadataStreamingRPC(t *testing.T) { + for _, e := range listTestEnv() { + testMetadataStreamingRPC(t, e) + } +} + +func testMetadataStreamingRPC(t *testing.T, e env) { + s, cc := setUp(t, nil, math.MaxUint32, "", e) + tc := testpb.NewTestServiceClient(cc) + defer tearDown(s, cc) + ctx := metadata.NewContext(context.Background(), testMetadata) + stream, err := tc.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + go func() { + headerMD, err := stream.Header() + if e.security == "tls" { + delete(headerMD, "transport_security_type") + } + if err != nil || !reflect.DeepEqual(testMetadata, headerMD) { + t.Errorf("#1 %v.Header() = %v, %v, want %v, ", stream, headerMD, err, testMetadata) + } + // test the cached value. + headerMD, err = stream.Header() + if err != nil || !reflect.DeepEqual(testMetadata, headerMD) { + t.Errorf("#2 %v.Header() = %v, %v, want %v, ", stream, headerMD, err, testMetadata) + } + var index int + for index < len(reqSizes) { + respParam := []*testpb.ResponseParameters{ + { + Size: proto.Int32(int32(respSizes[index])), + }, + } + + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index])) + if err != nil { + t.Fatal(err) + } + + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + Payload: payload, + } + if err := stream.Send(req); err != nil { + t.Errorf("%v.Send(%v) = %v, want ", stream, req, err) + return + } + index++ + } + // Tell the server we're done sending args. + stream.CloseSend() + }() + for { + if _, err := stream.Recv(); err != nil { + break + } + } + trailerMD := stream.Trailer() + if !reflect.DeepEqual(testMetadata, trailerMD) { + t.Fatalf("%v.Trailer() = %v, want %v", stream, trailerMD, testMetadata) + } +} + +func TestServerStreaming(t *testing.T) { + for _, e := range listTestEnv() { + testServerStreaming(t, e) + } +} + +func testServerStreaming(t *testing.T, e env) { + s, cc := setUp(t, nil, math.MaxUint32, "", e) + tc := testpb.NewTestServiceClient(cc) + defer tearDown(s, cc) + respParam := make([]*testpb.ResponseParameters, len(respSizes)) + for i, s := range respSizes { + respParam[i] = &testpb.ResponseParameters{ + Size: proto.Int32(int32(s)), + } + } + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + } + stream, err := tc.StreamingOutputCall(context.Background(), req) + if err != nil { + t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want ", tc, err) + } + var rpcStatus error + var respCnt int + var index int + for { + reply, err := stream.Recv() + if err != nil { + rpcStatus = err + break + } + pt := reply.GetPayload().GetType() + if pt != testpb.PayloadType_COMPRESSABLE { + t.Fatalf("Got the reply of type %d, want %d", pt, testpb.PayloadType_COMPRESSABLE) + } + size := len(reply.GetPayload().GetBody()) + if size != int(respSizes[index]) { + t.Fatalf("Got reply body of length %d, want %d", size, respSizes[index]) + } + index++ + respCnt++ + } + if rpcStatus != io.EOF { + t.Fatalf("Failed to finish the server streaming rpc: %v, want ", rpcStatus) + } + if respCnt != len(respSizes) { + t.Fatalf("Got %d reply, want %d", len(respSizes), respCnt) + } +} + +func TestFailedServerStreaming(t *testing.T) { + for _, e := range listTestEnv() { + testFailedServerStreaming(t, e) + } +} + +func testFailedServerStreaming(t *testing.T, e env) { + s, cc := setUp(t, nil, math.MaxUint32, "", e) + tc := testpb.NewTestServiceClient(cc) + defer tearDown(s, cc) + respParam := make([]*testpb.ResponseParameters, len(respSizes)) + for i, s := range respSizes { + respParam[i] = &testpb.ResponseParameters{ + Size: proto.Int32(int32(s)), + } + } + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + } + ctx := metadata.NewContext(context.Background(), testMetadata) + stream, err := tc.StreamingOutputCall(ctx, req) + if err != nil { + t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want ", tc, err) + } + if _, err := stream.Recv(); err != grpc.Errorf(codes.DataLoss, "got extra metadata") { + t.Fatalf("%v.Recv() = _, %v, want _, %v", stream, err, grpc.Errorf(codes.DataLoss, "got extra metadata")) + } +} + +func TestClientStreaming(t *testing.T) { + for _, e := range listTestEnv() { + testClientStreaming(t, e) + } +} + +func testClientStreaming(t *testing.T, e env) { + s, cc := setUp(t, nil, math.MaxUint32, "", e) + tc := testpb.NewTestServiceClient(cc) + defer tearDown(s, cc) + stream, err := tc.StreamingInputCall(context.Background()) + if err != nil { + t.Fatalf("%v.StreamingInputCall(_) = _, %v, want ", tc, err) + } + var sum int + + for _, s := range reqSizes { + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(s)) + if err != nil { + t.Fatal(err) + } + + req := &testpb.StreamingInputCallRequest{ + Payload: payload, + } + if err := stream.Send(req); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, req, err) + } + sum += s + } + reply, err := stream.CloseAndRecv() + if err != nil { + t.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) + } + if reply.GetAggregatedPayloadSize() != int32(sum) { + t.Fatalf("%v.CloseAndRecv().GetAggregatePayloadSize() = %v; want %v", stream, reply.GetAggregatedPayloadSize(), sum) + } +} + +func TestExceedMaxStreamsLimit(t *testing.T) { + for _, e := range listTestEnv() { + testExceedMaxStreamsLimit(t, e) + } +} + +func testExceedMaxStreamsLimit(t *testing.T, e env) { + // Only allows 1 live stream per server transport. + s, cc := setUp(t, nil, 1, "", e) + tc := testpb.NewTestServiceClient(cc) + defer tearDown(s, cc) + done := make(chan struct{}) + ch := make(chan int) + go func() { + for { + select { + case <-time.After(5 * time.Millisecond): + ch <- 0 + case <-time.After(5 * time.Second): + close(done) + return + } + } + }() + // Loop until a stream creation hangs due to the new max stream setting. + for { + select { + case <-ch: + ctx, _ := context.WithTimeout(context.Background(), time.Second) + if _, err := tc.StreamingInputCall(ctx); err != nil { + if grpc.Code(err) == codes.DeadlineExceeded { + return + } + t.Fatalf("%v.StreamingInputCall(_) = %v, want ", tc, err) + } + case <-done: + t.Fatalf("Client has not received the max stream setting in 5 seconds.") + } + } +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/test/grpc_testing/test.pb.go b/Godeps/_workspace/src/google.golang.org/grpc/test/grpc_testing/test.pb.go new file mode 100644 index 000000000..b25e98b8e --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/test/grpc_testing/test.pb.go @@ -0,0 +1,702 @@ +// Code generated by protoc-gen-go. +// source: test.proto +// DO NOT EDIT! + +/* +Package grpc_testing is a generated protocol buffer package. + +It is generated from these files: + test.proto + +It has these top-level messages: + Empty + Payload + SimpleRequest + SimpleResponse + StreamingInputCallRequest + StreamingInputCallResponse + ResponseParameters + StreamingOutputCallRequest + StreamingOutputCallResponse +*/ +package grpc_testing + +import proto "github.com/golang/protobuf/proto" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +// The type of payload that should be returned. +type PayloadType int32 + +const ( + // Compressable text format. + PayloadType_COMPRESSABLE PayloadType = 0 + // Uncompressable binary format. + PayloadType_UNCOMPRESSABLE PayloadType = 1 + // Randomly chosen from all other formats defined in this enum. + PayloadType_RANDOM PayloadType = 2 +) + +var PayloadType_name = map[int32]string{ + 0: "COMPRESSABLE", + 1: "UNCOMPRESSABLE", + 2: "RANDOM", +} +var PayloadType_value = map[string]int32{ + "COMPRESSABLE": 0, + "UNCOMPRESSABLE": 1, + "RANDOM": 2, +} + +func (x PayloadType) Enum() *PayloadType { + p := new(PayloadType) + *p = x + return p +} +func (x PayloadType) String() string { + return proto.EnumName(PayloadType_name, int32(x)) +} +func (x *PayloadType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(PayloadType_value, data, "PayloadType") + if err != nil { + return err + } + *x = PayloadType(value) + return nil +} + +type Empty struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} + +// A block of data, to simply increase gRPC message size. +type Payload struct { + // The type of data in body. + Type *PayloadType `protobuf:"varint,1,opt,name=type,enum=grpc.testing.PayloadType" json:"type,omitempty"` + // Primary contents of payload. + Body []byte `protobuf:"bytes,2,opt,name=body" json:"body,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Payload) Reset() { *m = Payload{} } +func (m *Payload) String() string { return proto.CompactTextString(m) } +func (*Payload) ProtoMessage() {} + +func (m *Payload) GetType() PayloadType { + if m != nil && m.Type != nil { + return *m.Type + } + return PayloadType_COMPRESSABLE +} + +func (m *Payload) GetBody() []byte { + if m != nil { + return m.Body + } + return nil +} + +// Unary request. +type SimpleRequest struct { + // Desired payload type in the response from the server. + // If response_type is RANDOM, server randomly chooses one from other formats. + ResponseType *PayloadType `protobuf:"varint,1,opt,name=response_type,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` + // Desired payload size in the response from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + ResponseSize *int32 `protobuf:"varint,2,opt,name=response_size" json:"response_size,omitempty"` + // Optional input payload sent along with the request. + Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"` + // Whether SimpleResponse should include username. + FillUsername *bool `protobuf:"varint,4,opt,name=fill_username" json:"fill_username,omitempty"` + // Whether SimpleResponse should include OAuth scope. + FillOauthScope *bool `protobuf:"varint,5,opt,name=fill_oauth_scope" json:"fill_oauth_scope,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SimpleRequest) Reset() { *m = SimpleRequest{} } +func (m *SimpleRequest) String() string { return proto.CompactTextString(m) } +func (*SimpleRequest) ProtoMessage() {} + +func (m *SimpleRequest) GetResponseType() PayloadType { + if m != nil && m.ResponseType != nil { + return *m.ResponseType + } + return PayloadType_COMPRESSABLE +} + +func (m *SimpleRequest) GetResponseSize() int32 { + if m != nil && m.ResponseSize != nil { + return *m.ResponseSize + } + return 0 +} + +func (m *SimpleRequest) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (m *SimpleRequest) GetFillUsername() bool { + if m != nil && m.FillUsername != nil { + return *m.FillUsername + } + return false +} + +func (m *SimpleRequest) GetFillOauthScope() bool { + if m != nil && m.FillOauthScope != nil { + return *m.FillOauthScope + } + return false +} + +// Unary response, as configured by the request. +type SimpleResponse struct { + // Payload to increase message size. + Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` + // The user the request came from, for verifying authentication was + // successful when the client expected it. + Username *string `protobuf:"bytes,2,opt,name=username" json:"username,omitempty"` + // OAuth scope. + OauthScope *string `protobuf:"bytes,3,opt,name=oauth_scope" json:"oauth_scope,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SimpleResponse) Reset() { *m = SimpleResponse{} } +func (m *SimpleResponse) String() string { return proto.CompactTextString(m) } +func (*SimpleResponse) ProtoMessage() {} + +func (m *SimpleResponse) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (m *SimpleResponse) GetUsername() string { + if m != nil && m.Username != nil { + return *m.Username + } + return "" +} + +func (m *SimpleResponse) GetOauthScope() string { + if m != nil && m.OauthScope != nil { + return *m.OauthScope + } + return "" +} + +// Client-streaming request. +type StreamingInputCallRequest struct { + // Optional input payload sent along with the request. + Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StreamingInputCallRequest) Reset() { *m = StreamingInputCallRequest{} } +func (m *StreamingInputCallRequest) String() string { return proto.CompactTextString(m) } +func (*StreamingInputCallRequest) ProtoMessage() {} + +func (m *StreamingInputCallRequest) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +// Client-streaming response. +type StreamingInputCallResponse struct { + // Aggregated size of payloads received from the client. + AggregatedPayloadSize *int32 `protobuf:"varint,1,opt,name=aggregated_payload_size" json:"aggregated_payload_size,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StreamingInputCallResponse) Reset() { *m = StreamingInputCallResponse{} } +func (m *StreamingInputCallResponse) String() string { return proto.CompactTextString(m) } +func (*StreamingInputCallResponse) ProtoMessage() {} + +func (m *StreamingInputCallResponse) GetAggregatedPayloadSize() int32 { + if m != nil && m.AggregatedPayloadSize != nil { + return *m.AggregatedPayloadSize + } + return 0 +} + +// Configuration for a particular response. +type ResponseParameters struct { + // Desired payload sizes in responses from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + Size *int32 `protobuf:"varint,1,opt,name=size" json:"size,omitempty"` + // Desired interval between consecutive responses in the response stream in + // microseconds. + IntervalUs *int32 `protobuf:"varint,2,opt,name=interval_us" json:"interval_us,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ResponseParameters) Reset() { *m = ResponseParameters{} } +func (m *ResponseParameters) String() string { return proto.CompactTextString(m) } +func (*ResponseParameters) ProtoMessage() {} + +func (m *ResponseParameters) GetSize() int32 { + if m != nil && m.Size != nil { + return *m.Size + } + return 0 +} + +func (m *ResponseParameters) GetIntervalUs() int32 { + if m != nil && m.IntervalUs != nil { + return *m.IntervalUs + } + return 0 +} + +// Server-streaming request. +type StreamingOutputCallRequest struct { + // Desired payload type in the response from the server. + // If response_type is RANDOM, the payload from each response in the stream + // might be of different types. This is to simulate a mixed type of payload + // stream. + ResponseType *PayloadType `protobuf:"varint,1,opt,name=response_type,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` + // Configuration for each expected response message. + ResponseParameters []*ResponseParameters `protobuf:"bytes,2,rep,name=response_parameters" json:"response_parameters,omitempty"` + // Optional input payload sent along with the request. + Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StreamingOutputCallRequest) Reset() { *m = StreamingOutputCallRequest{} } +func (m *StreamingOutputCallRequest) String() string { return proto.CompactTextString(m) } +func (*StreamingOutputCallRequest) ProtoMessage() {} + +func (m *StreamingOutputCallRequest) GetResponseType() PayloadType { + if m != nil && m.ResponseType != nil { + return *m.ResponseType + } + return PayloadType_COMPRESSABLE +} + +func (m *StreamingOutputCallRequest) GetResponseParameters() []*ResponseParameters { + if m != nil { + return m.ResponseParameters + } + return nil +} + +func (m *StreamingOutputCallRequest) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +// Server-streaming response, as configured by the request and parameters. +type StreamingOutputCallResponse struct { + // Payload to increase response size. + Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StreamingOutputCallResponse) Reset() { *m = StreamingOutputCallResponse{} } +func (m *StreamingOutputCallResponse) String() string { return proto.CompactTextString(m) } +func (*StreamingOutputCallResponse) ProtoMessage() {} + +func (m *StreamingOutputCallResponse) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +func init() { + proto.RegisterEnum("grpc.testing.PayloadType", PayloadType_name, PayloadType_value) +} + +// Client API for TestService service + +type TestServiceClient interface { + // One empty request followed by one empty response. + EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) + // One request followed by one response. + // The server returns the client payload as-is. + UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) + // One request followed by a sequence of responses (streamed download). + // The server returns the payload with client desired type and sizes. + StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) + // A sequence of requests followed by one response (streamed upload). + // The server returns the aggregated size of client payload as the result. + StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) + // A sequence of requests with each request served by the server immediately. + // As one request could lead to multiple responses, this interface + // demonstrates the idea of full duplexing. + FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) + // A sequence of requests followed by a sequence of responses. + // The server buffers all the client requests and then serves them in order. A + // stream of responses are returned to the client when the server starts with + // first request. + HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) +} + +type testServiceClient struct { + cc *grpc.ClientConn +} + +func NewTestServiceClient(cc *grpc.ClientConn) TestServiceClient { + return &testServiceClient{cc} +} + +func (c *testServiceClient) EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := grpc.Invoke(ctx, "/grpc.testing.TestService/EmptyCall", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *testServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { + out := new(SimpleResponse) + err := grpc.Invoke(ctx, "/grpc.testing.TestService/UnaryCall", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *testServiceClient) StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) { + stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[0], c.cc, "/grpc.testing.TestService/StreamingOutputCall", opts...) + if err != nil { + return nil, err + } + x := &testServiceStreamingOutputCallClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type TestService_StreamingOutputCallClient interface { + Recv() (*StreamingOutputCallResponse, error) + grpc.ClientStream +} + +type testServiceStreamingOutputCallClient struct { + grpc.ClientStream +} + +func (x *testServiceStreamingOutputCallClient) Recv() (*StreamingOutputCallResponse, error) { + m := new(StreamingOutputCallResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *testServiceClient) StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) { + stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[1], c.cc, "/grpc.testing.TestService/StreamingInputCall", opts...) + if err != nil { + return nil, err + } + x := &testServiceStreamingInputCallClient{stream} + return x, nil +} + +type TestService_StreamingInputCallClient interface { + Send(*StreamingInputCallRequest) error + CloseAndRecv() (*StreamingInputCallResponse, error) + grpc.ClientStream +} + +type testServiceStreamingInputCallClient struct { + grpc.ClientStream +} + +func (x *testServiceStreamingInputCallClient) Send(m *StreamingInputCallRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *testServiceStreamingInputCallClient) CloseAndRecv() (*StreamingInputCallResponse, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(StreamingInputCallResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *testServiceClient) FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) { + stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[2], c.cc, "/grpc.testing.TestService/FullDuplexCall", opts...) + if err != nil { + return nil, err + } + x := &testServiceFullDuplexCallClient{stream} + return x, nil +} + +type TestService_FullDuplexCallClient interface { + Send(*StreamingOutputCallRequest) error + Recv() (*StreamingOutputCallResponse, error) + grpc.ClientStream +} + +type testServiceFullDuplexCallClient struct { + grpc.ClientStream +} + +func (x *testServiceFullDuplexCallClient) Send(m *StreamingOutputCallRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *testServiceFullDuplexCallClient) Recv() (*StreamingOutputCallResponse, error) { + m := new(StreamingOutputCallResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *testServiceClient) HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) { + stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[3], c.cc, "/grpc.testing.TestService/HalfDuplexCall", opts...) + if err != nil { + return nil, err + } + x := &testServiceHalfDuplexCallClient{stream} + return x, nil +} + +type TestService_HalfDuplexCallClient interface { + Send(*StreamingOutputCallRequest) error + Recv() (*StreamingOutputCallResponse, error) + grpc.ClientStream +} + +type testServiceHalfDuplexCallClient struct { + grpc.ClientStream +} + +func (x *testServiceHalfDuplexCallClient) Send(m *StreamingOutputCallRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *testServiceHalfDuplexCallClient) Recv() (*StreamingOutputCallResponse, error) { + m := new(StreamingOutputCallResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for TestService service + +type TestServiceServer interface { + // One empty request followed by one empty response. + EmptyCall(context.Context, *Empty) (*Empty, error) + // One request followed by one response. + // The server returns the client payload as-is. + UnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error) + // One request followed by a sequence of responses (streamed download). + // The server returns the payload with client desired type and sizes. + StreamingOutputCall(*StreamingOutputCallRequest, TestService_StreamingOutputCallServer) error + // A sequence of requests followed by one response (streamed upload). + // The server returns the aggregated size of client payload as the result. + StreamingInputCall(TestService_StreamingInputCallServer) error + // A sequence of requests with each request served by the server immediately. + // As one request could lead to multiple responses, this interface + // demonstrates the idea of full duplexing. + FullDuplexCall(TestService_FullDuplexCallServer) error + // A sequence of requests followed by a sequence of responses. + // The server buffers all the client requests and then serves them in order. A + // stream of responses are returned to the client when the server starts with + // first request. + HalfDuplexCall(TestService_HalfDuplexCallServer) error +} + +func RegisterTestServiceServer(s *grpc.Server, srv TestServiceServer) { + s.RegisterService(&_TestService_serviceDesc, srv) +} + +func _TestService_EmptyCall_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { + in := new(Empty) + if err := codec.Unmarshal(buf, in); err != nil { + return nil, err + } + out, err := srv.(TestServiceServer).EmptyCall(ctx, in) + if err != nil { + return nil, err + } + return out, nil +} + +func _TestService_UnaryCall_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { + in := new(SimpleRequest) + if err := codec.Unmarshal(buf, in); err != nil { + return nil, err + } + out, err := srv.(TestServiceServer).UnaryCall(ctx, in) + if err != nil { + return nil, err + } + return out, nil +} + +func _TestService_StreamingOutputCall_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(StreamingOutputCallRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(TestServiceServer).StreamingOutputCall(m, &testServiceStreamingOutputCallServer{stream}) +} + +type TestService_StreamingOutputCallServer interface { + Send(*StreamingOutputCallResponse) error + grpc.ServerStream +} + +type testServiceStreamingOutputCallServer struct { + grpc.ServerStream +} + +func (x *testServiceStreamingOutputCallServer) Send(m *StreamingOutputCallResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _TestService_StreamingInputCall_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TestServiceServer).StreamingInputCall(&testServiceStreamingInputCallServer{stream}) +} + +type TestService_StreamingInputCallServer interface { + SendAndClose(*StreamingInputCallResponse) error + Recv() (*StreamingInputCallRequest, error) + grpc.ServerStream +} + +type testServiceStreamingInputCallServer struct { + grpc.ServerStream +} + +func (x *testServiceStreamingInputCallServer) SendAndClose(m *StreamingInputCallResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *testServiceStreamingInputCallServer) Recv() (*StreamingInputCallRequest, error) { + m := new(StreamingInputCallRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _TestService_FullDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TestServiceServer).FullDuplexCall(&testServiceFullDuplexCallServer{stream}) +} + +type TestService_FullDuplexCallServer interface { + Send(*StreamingOutputCallResponse) error + Recv() (*StreamingOutputCallRequest, error) + grpc.ServerStream +} + +type testServiceFullDuplexCallServer struct { + grpc.ServerStream +} + +func (x *testServiceFullDuplexCallServer) Send(m *StreamingOutputCallResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *testServiceFullDuplexCallServer) Recv() (*StreamingOutputCallRequest, error) { + m := new(StreamingOutputCallRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _TestService_HalfDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TestServiceServer).HalfDuplexCall(&testServiceHalfDuplexCallServer{stream}) +} + +type TestService_HalfDuplexCallServer interface { + Send(*StreamingOutputCallResponse) error + Recv() (*StreamingOutputCallRequest, error) + grpc.ServerStream +} + +type testServiceHalfDuplexCallServer struct { + grpc.ServerStream +} + +func (x *testServiceHalfDuplexCallServer) Send(m *StreamingOutputCallResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *testServiceHalfDuplexCallServer) Recv() (*StreamingOutputCallRequest, error) { + m := new(StreamingOutputCallRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _TestService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.testing.TestService", + HandlerType: (*TestServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "EmptyCall", + Handler: _TestService_EmptyCall_Handler, + }, + { + MethodName: "UnaryCall", + Handler: _TestService_UnaryCall_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamingOutputCall", + Handler: _TestService_StreamingOutputCall_Handler, + ServerStreams: true, + }, + { + StreamName: "StreamingInputCall", + Handler: _TestService_StreamingInputCall_Handler, + ClientStreams: true, + }, + { + StreamName: "FullDuplexCall", + Handler: _TestService_FullDuplexCall_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "HalfDuplexCall", + Handler: _TestService_HalfDuplexCall_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/test/grpc_testing/test.proto b/Godeps/_workspace/src/google.golang.org/grpc/test/grpc_testing/test.proto new file mode 100644 index 000000000..b5bfe0537 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/test/grpc_testing/test.proto @@ -0,0 +1,140 @@ +// An integration test service that covers all the method signature permutations +// of unary/streaming requests/responses. +syntax = "proto2"; + +package grpc.testing; + +message Empty {} + +// The type of payload that should be returned. +enum PayloadType { + // Compressable text format. + COMPRESSABLE = 0; + + // Uncompressable binary format. + UNCOMPRESSABLE = 1; + + // Randomly chosen from all other formats defined in this enum. + RANDOM = 2; +} + +// A block of data, to simply increase gRPC message size. +message Payload { + // The type of data in body. + optional PayloadType type = 1; + // Primary contents of payload. + optional bytes body = 2; +} + +// Unary request. +message SimpleRequest { + // Desired payload type in the response from the server. + // If response_type is RANDOM, server randomly chooses one from other formats. + optional PayloadType response_type = 1; + + // Desired payload size in the response from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + optional int32 response_size = 2; + + // Optional input payload sent along with the request. + optional Payload payload = 3; + + // Whether SimpleResponse should include username. + optional bool fill_username = 4; + + // Whether SimpleResponse should include OAuth scope. + optional bool fill_oauth_scope = 5; +} + +// Unary response, as configured by the request. +message SimpleResponse { + // Payload to increase message size. + optional Payload payload = 1; + + // The user the request came from, for verifying authentication was + // successful when the client expected it. + optional string username = 2; + + // OAuth scope. + optional string oauth_scope = 3; +} + +// Client-streaming request. +message StreamingInputCallRequest { + // Optional input payload sent along with the request. + optional Payload payload = 1; + + // Not expecting any payload from the response. +} + +// Client-streaming response. +message StreamingInputCallResponse { + // Aggregated size of payloads received from the client. + optional int32 aggregated_payload_size = 1; +} + +// Configuration for a particular response. +message ResponseParameters { + // Desired payload sizes in responses from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + optional int32 size = 1; + + // Desired interval between consecutive responses in the response stream in + // microseconds. + optional int32 interval_us = 2; +} + +// Server-streaming request. +message StreamingOutputCallRequest { + // Desired payload type in the response from the server. + // If response_type is RANDOM, the payload from each response in the stream + // might be of different types. This is to simulate a mixed type of payload + // stream. + optional PayloadType response_type = 1; + + // Configuration for each expected response message. + repeated ResponseParameters response_parameters = 2; + + // Optional input payload sent along with the request. + optional Payload payload = 3; +} + +// Server-streaming response, as configured by the request and parameters. +message StreamingOutputCallResponse { + // Payload to increase response size. + optional Payload payload = 1; +} + +// A simple service to test the various types of RPCs and experiment with +// performance with various types of payload. +service TestService { + // One empty request followed by one empty response. + rpc EmptyCall(Empty) returns (Empty); + + // One request followed by one response. + // The server returns the client payload as-is. + rpc UnaryCall(SimpleRequest) returns (SimpleResponse); + + // One request followed by a sequence of responses (streamed download). + // The server returns the payload with client desired type and sizes. + rpc StreamingOutputCall(StreamingOutputCallRequest) + returns (stream StreamingOutputCallResponse); + + // A sequence of requests followed by one response (streamed upload). + // The server returns the aggregated size of client payload as the result. + rpc StreamingInputCall(stream StreamingInputCallRequest) + returns (StreamingInputCallResponse); + + // A sequence of requests with each request served by the server immediately. + // As one request could lead to multiple responses, this interface + // demonstrates the idea of full duplexing. + rpc FullDuplexCall(stream StreamingOutputCallRequest) + returns (stream StreamingOutputCallResponse); + + // A sequence of requests followed by a sequence of responses. + // The server buffers all the client requests and then serves them in order. A + // stream of responses are returned to the client when the server starts with + // first request. + rpc HalfDuplexCall(stream StreamingOutputCallRequest) + returns (stream StreamingOutputCallResponse); +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/test/testdata/ca.pem b/Godeps/_workspace/src/google.golang.org/grpc/test/testdata/ca.pem new file mode 100644 index 000000000..999da8977 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/test/testdata/ca.pem @@ -0,0 +1,14 @@ +-----BEGIN CERTIFICATE----- +MIICIzCCAYwCCQCFTbF7XNSvvjANBgkqhkiG9w0BAQUFADBWMQswCQYDVQQGEwJB +VTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0 +cyBQdHkgTHRkMQ8wDQYDVQQDDAZ0ZXN0Y2EwHhcNMTQwNzE3MjMxNzUxWhcNMjQw +NzE0MjMxNzUxWjBWMQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEh +MB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMQ8wDQYDVQQDDAZ0ZXN0 +Y2EwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMBA3wVeTGHZR1Rye/i+J8a2 +cu5gXwFV6TnObzGM7bLFCO5i9v4mLo4iFzPsHmWDUxKS3Y8iXbu0eYBlLoNY0lSv +xDx33O+DuwMmVN+DzSD+Eod9zfvwOWHsazYCZT2PhNxnVWIuJXViY4JAHUGodjx+ +QAi6yCAurUZGvYXGgZSBAgMBAAEwDQYJKoZIhvcNAQEFBQADgYEAQoQVD8bwdtWJ +AniGBwcCfqYyH+/KpA10AcebJVVTyYbYvI9Q8d6RSVu4PZy9OALHR/QrWBdYTAyz +fNAmc2cmdkSRJzjhIaOstnQy1J+Fk0T9XyvQtq499yFbq9xogUVlEGH62xP6vH0Y +5ukK//dCPAzA11YuX2rnex0JhuTQfcI= +-----END CERTIFICATE----- diff --git a/Godeps/_workspace/src/google.golang.org/grpc/test/testdata/server1.key b/Godeps/_workspace/src/google.golang.org/grpc/test/testdata/server1.key new file mode 100644 index 000000000..ed00cd500 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/test/testdata/server1.key @@ -0,0 +1,15 @@ +-----BEGIN RSA PRIVATE KEY----- +MIICWwIBAAKBgQDhwxUnKCwlSaWAwzOB2LSHVegJHv7DDWminTg4wzLLsf+LQ8nZ +bpjfn5vgIzxCuRh4Rp9QYM5FhfrJX9wcYawP/HTbJ7p7LVQO2QYAP+akMTHxgKuM +BzVV++3wWToKfVZUjFX8nfTfGMGwWAHJDnlEGnU4tl9UujoCV4ENJtzFoQIDAQAB +AoGAJ+6hpzNr24yTQZtFWQpDpEyFplddKJMOxDya3S9ppK3vTWrIITV2xNcucw7I +ceTbdyrGsyjsU0/HdCcIf9ym2jfmGLUwmyhltKVw0QYcFB0XLkc0nI5YvEYoeVDg +omZIXn1E3EW+sSIWSbkMu9bY2kstKXR2UZmMgWDtmBEPMaECQQD6yT4TAZM5hGBb +ciBKgMUP6PwOhPhOMPIvijO50Aiu6iuCV88l1QIy38gWVhxjNrq6P346j4IBg+kB +9alwpCODAkEA5nSnm9k6ykYeQWNS0fNWiRinCdl23A7usDGSuKKlm019iomJ/Rgd +MKDOp0q/2OostbteOWM2MRFf4jMH3wyVCwJAfAdjJ8szoNKTRSagSbh9vWygnB2v +IByc6l4TTuZQJRGzCveafz9lovuB3WohCABdQRd9ukCXL2CpsEpqzkafOQJAJUjc +USedDlq3zGZwYM1Yw8d8RuirBUFZNqJelYai+nRYClDkRVFgb5yksoYycbq5TxGo +VeqKOvgPpj4RWPHlLwJAGUMk3bqT91xBUCnLRs/vfoCpHpg6eywQTBDAV6xkyz4a +RH3I7/+yj3ZxR2JoWHgUwZ7lZk1VnhffFye7SBXyag== +-----END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/google.golang.org/grpc/test/testdata/server1.pem b/Godeps/_workspace/src/google.golang.org/grpc/test/testdata/server1.pem new file mode 100644 index 000000000..8e582e571 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/test/testdata/server1.pem @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE----- +MIICmzCCAgSgAwIBAgIBAzANBgkqhkiG9w0BAQUFADBWMQswCQYDVQQGEwJBVTET +MBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQ +dHkgTHRkMQ8wDQYDVQQDDAZ0ZXN0Y2EwHhcNMTQwNzIyMDYwMDU3WhcNMjQwNzE5 +MDYwMDU3WjBkMQswCQYDVQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNV +BAcTB0NoaWNhZ28xFDASBgNVBAoTC0dvb2dsZSBJbmMuMRowGAYDVQQDFBEqLnRl +c3QuZ29vZ2xlLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA4cMVJygs +JUmlgMMzgdi0h1XoCR7+ww1pop04OMMyy7H/i0PJ2W6Y35+b4CM8QrkYeEafUGDO +RYX6yV/cHGGsD/x02ye6ey1UDtkGAD/mpDEx8YCrjAc1Vfvt8Fk6Cn1WVIxV/J30 +3xjBsFgByQ55RBp1OLZfVLo6AleBDSbcxaECAwEAAaNrMGkwCQYDVR0TBAIwADAL +BgNVHQ8EBAMCBeAwTwYDVR0RBEgwRoIQKi50ZXN0Lmdvb2dsZS5mcoIYd2F0ZXJ6 +b29pLnRlc3QuZ29vZ2xlLmJlghIqLnRlc3QueW91dHViZS5jb22HBMCoAQMwDQYJ +KoZIhvcNAQEFBQADgYEAM2Ii0LgTGbJ1j4oqX9bxVcxm+/R5Yf8oi0aZqTJlnLYS +wXcBykxTx181s7WyfJ49WwrYXo78zTDAnf1ma0fPq3e4mpspvyndLh1a+OarHa1e +aT0DIIYk7qeEa1YcVljx2KyLd0r1BBAfrwyGaEPVeJQVYWaOJRU2we/KD4ojf9s= +-----END CERTIFICATE----- diff --git a/Godeps/_workspace/src/google.golang.org/grpc/trace.go b/Godeps/_workspace/src/google.golang.org/grpc/trace.go new file mode 100644 index 000000000..246357406 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/trace.go @@ -0,0 +1,116 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" + "time" + + "golang.org/x/net/trace" +) + +// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package. +// This should only be set before any RPCs are sent or received by this program. +var EnableTracing = true + +// methodFamily returns the trace family for the given method. +// It turns "/pkg.Service/GetFoo" into "pkg.Service". +func methodFamily(m string) string { + m = strings.TrimPrefix(m, "/") // remove leading slash + if i := strings.Index(m, "/"); i >= 0 { + m = m[:i] // remove everything from second slash + } + if i := strings.LastIndex(m, "."); i >= 0 { + m = m[i+1:] // cut down to last dotted component + } + return m +} + +// traceInfo contains tracing information for an RPC. +type traceInfo struct { + tr trace.Trace + firstLine firstLine +} + +// firstLine is the first line of an RPC trace. +type firstLine struct { + client bool // whether this is a client (outgoing) RPC + remoteAddr net.Addr + deadline time.Duration // may be zero +} + +func (f *firstLine) String() string { + var line bytes.Buffer + io.WriteString(&line, "RPC: ") + if f.client { + io.WriteString(&line, "to") + } else { + io.WriteString(&line, "from") + } + fmt.Fprintf(&line, " %v deadline:", f.remoteAddr) + if f.deadline != 0 { + fmt.Fprint(&line, f.deadline) + } else { + io.WriteString(&line, "none") + } + return line.String() +} + +// payload represents an RPC request or response payload. +type payload struct { + sent bool // whether this is an outgoing payload + msg interface{} // e.g. a proto.Message + // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? +} + +func (p payload) String() string { + if p.sent { + return fmt.Sprintf("sent: %v", p.msg) + } else { + return fmt.Sprintf("recv: %v", p.msg) + } +} + +type fmtStringer struct { + format string + a []interface{} +} + +func (f *fmtStringer) String() string { + return fmt.Sprintf(f.format, f.a...) +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/transport/control.go b/Godeps/_workspace/src/google.golang.org/grpc/transport/control.go new file mode 100644 index 000000000..464bf10a3 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/transport/control.go @@ -0,0 +1,259 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package transport + +import ( + "fmt" + "sync" + + "github.com/bradfitz/http2" +) + +const ( + // The default value of flow control window size in HTTP2 spec. + defaultWindowSize = 65535 + // The initial window size for flow control. + initialWindowSize = defaultWindowSize // for an RPC + initialConnWindowSize = defaultWindowSize * 16 // for a connection +) + +// The following defines various control items which could flow through +// the control buffer of transport. They represent different aspects of +// control tasks, e.g., flow control, settings, streaming resetting, etc. +type windowUpdate struct { + streamID uint32 + increment uint32 +} + +func (windowUpdate) isItem() bool { + return true +} + +type settings struct { + ack bool + ss []http2.Setting +} + +func (settings) isItem() bool { + return true +} + +type resetStream struct { + streamID uint32 + code http2.ErrCode +} + +func (resetStream) isItem() bool { + return true +} + +type flushIO struct { +} + +func (flushIO) isItem() bool { + return true +} + +type ping struct { + ack bool +} + +func (ping) isItem() bool { + return true +} + +// quotaPool is a pool which accumulates the quota and sends it to acquire() +// when it is available. +type quotaPool struct { + c chan int + + mu sync.Mutex + quota int +} + +// newQuotaPool creates a quotaPool which has quota q available to consume. +func newQuotaPool(q int) *quotaPool { + qb := "aPool{ + c: make(chan int, 1), + } + if q > 0 { + qb.c <- q + } else { + qb.quota = q + } + return qb +} + +// add adds n to the available quota and tries to send it on acquire. +func (qb *quotaPool) add(n int) { + qb.mu.Lock() + defer qb.mu.Unlock() + qb.quota += n + if qb.quota <= 0 { + return + } + select { + case qb.c <- qb.quota: + qb.quota = 0 + default: + } +} + +// cancel cancels the pending quota sent on acquire, if any. +func (qb *quotaPool) cancel() { + qb.mu.Lock() + defer qb.mu.Unlock() + select { + case n := <-qb.c: + qb.quota += n + default: + } +} + +// reset cancels the pending quota sent on acquired, incremented by v and sends +// it back on acquire. +func (qb *quotaPool) reset(v int) { + qb.mu.Lock() + defer qb.mu.Unlock() + select { + case n := <-qb.c: + qb.quota += n + default: + } + qb.quota += v + if qb.quota <= 0 { + return + } + select { + case qb.c <- qb.quota: + qb.quota = 0 + default: + } +} + +// acquire returns the channel on which available quota amounts are sent. +func (qb *quotaPool) acquire() <-chan int { + return qb.c +} + +// inFlow deals with inbound flow control +type inFlow struct { + // The inbound flow control limit for pending data. + limit uint32 + // conn points to the shared connection-level inFlow that is shared + // by all streams on that conn. It is nil for the inFlow on the conn + // directly. + conn *inFlow + + mu sync.Mutex + // pendingData is the overall data which have been received but not been + // consumed by applications. + pendingData uint32 + // The amount of data the application has consumed but grpc has not sent + // window update for them. Used to reduce window update frequency. + pendingUpdate uint32 +} + +// onData is invoked when some data frame is received. It increments not only its +// own pendingData but also that of the associated connection-level flow. +func (f *inFlow) onData(n uint32) error { + if n == 0 { + return nil + } + f.mu.Lock() + defer f.mu.Unlock() + if f.pendingData+f.pendingUpdate+n > f.limit { + return fmt.Errorf("recieved %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate+n, f.limit) + } + if f.conn != nil { + if err := f.conn.onData(n); err != nil { + return ConnectionErrorf("%v", err) + } + } + f.pendingData += n + return nil +} + +// connOnRead updates the connection level states when the application consumes data. +func (f *inFlow) connOnRead(n uint32) uint32 { + if n == 0 || f.conn != nil { + return 0 + } + f.mu.Lock() + defer f.mu.Unlock() + f.pendingData -= n + f.pendingUpdate += n + if f.pendingUpdate >= f.limit/4 { + ret := f.pendingUpdate + f.pendingUpdate = 0 + return ret + } + return 0 +} + +// onRead is invoked when the application reads the data. It returns the window updates +// for both stream and connection level. +func (f *inFlow) onRead(n uint32) (swu, cwu uint32) { + if n == 0 { + return + } + f.mu.Lock() + defer f.mu.Unlock() + if f.pendingData == 0 { + // pendingData has been adjusted by restoreConn. + return + } + f.pendingData -= n + f.pendingUpdate += n + if f.pendingUpdate >= f.limit/4 { + swu = f.pendingUpdate + f.pendingUpdate = 0 + } + cwu = f.conn.connOnRead(n) + return +} + +// restoreConn is invoked when a stream is terminated. It removes its stake in +// the connection-level flow and resets its own state. +func (f *inFlow) restoreConn() uint32 { + if f.conn == nil { + return 0 + } + f.mu.Lock() + defer f.mu.Unlock() + n := f.pendingData + f.pendingData = 0 + f.pendingUpdate = 0 + return f.conn.connOnRead(n) +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/transport/http2_client.go b/Godeps/_workspace/src/google.golang.org/grpc/transport/http2_client.go new file mode 100644 index 000000000..40b764082 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/transport/http2_client.go @@ -0,0 +1,849 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package transport + +import ( + "bytes" + "errors" + "io" + "math" + "net" + "strings" + "sync" + "time" + + "github.com/bradfitz/http2" + "github.com/bradfitz/http2/hpack" + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" +) + +// http2Client implements the ClientTransport interface with HTTP2. +type http2Client struct { + target string // server name/addr + userAgent string + conn net.Conn // underlying communication channel + authInfo credentials.AuthInfo // auth info about the connection + nextID uint32 // the next stream ID to be used + + // writableChan synchronizes write access to the transport. + // A writer acquires the write lock by sending a value on writableChan + // and releases it by receiving from writableChan. + writableChan chan int + // shutdownChan is closed when Close is called. + // Blocking operations should select on shutdownChan to avoid + // blocking forever after Close. + // TODO(zhaoq): Maybe have a channel context? + shutdownChan chan struct{} + // errorChan is closed to notify the I/O error to the caller. + errorChan chan struct{} + + framer *framer + hBuf *bytes.Buffer // the buffer for HPACK encoding + hEnc *hpack.Encoder // HPACK encoder + + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + controlBuf *recvBuffer + fc *inFlow + // sendQuotaPool provides flow control to outbound message. + sendQuotaPool *quotaPool + // streamsQuota limits the max number of concurrent streams. + streamsQuota *quotaPool + + // The scheme used: https if TLS is on, http otherwise. + scheme string + + authCreds []credentials.Credentials + + mu sync.Mutex // guard the following variables + state transportState // the state of underlying connection + activeStreams map[uint32]*Stream + // The max number of concurrent streams + maxStreams int + // the per-stream outbound flow control window size set by the peer. + streamSendQuota uint32 +} + +// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 +// and starts to receive messages on it. Non-nil error returns if construction +// fails. +func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err error) { + if opts.Dialer == nil { + // Set the default Dialer. + opts.Dialer = func(addr string, timeout time.Duration) (net.Conn, error) { + return net.DialTimeout("tcp", addr, timeout) + } + } + scheme := "http" + startT := time.Now() + timeout := opts.Timeout + conn, connErr := opts.Dialer(addr, timeout) + if connErr != nil { + return nil, ConnectionErrorf("transport: %v", connErr) + } + var authInfo credentials.AuthInfo + for _, c := range opts.AuthOptions { + if ccreds, ok := c.(credentials.TransportAuthenticator); ok { + scheme = "https" + // TODO(zhaoq): Now the first TransportAuthenticator is used if there are + // multiple ones provided. Revisit this if it is not appropriate. Probably + // place the ClientTransport construction into a separate function to make + // things clear. + if timeout > 0 { + timeout -= time.Since(startT) + } + conn, authInfo, connErr = ccreds.ClientHandshake(addr, conn, timeout) + break + } + } + if connErr != nil { + return nil, ConnectionErrorf("transport: %v", connErr) + } + defer func() { + if err != nil { + conn.Close() + } + }() + // Send connection preface to server. + n, err := conn.Write(clientPreface) + if err != nil { + return nil, ConnectionErrorf("transport: %v", err) + } + if n != len(clientPreface) { + return nil, ConnectionErrorf("transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + } + framer := newFramer(conn) + if initialWindowSize != defaultWindowSize { + err = framer.writeSettings(true, http2.Setting{http2.SettingInitialWindowSize, uint32(initialWindowSize)}) + } else { + err = framer.writeSettings(true) + } + if err != nil { + return nil, ConnectionErrorf("transport: %v", err) + } + // Adjust the connection flow control window if needed. + if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 { + if err := framer.writeWindowUpdate(true, 0, delta); err != nil { + return nil, ConnectionErrorf("transport: %v", err) + } + } + ua := primaryUA + if opts.UserAgent != "" { + ua = opts.UserAgent + " " + ua + } + var buf bytes.Buffer + t := &http2Client{ + target: addr, + userAgent: ua, + conn: conn, + authInfo: authInfo, + // The client initiated stream id is odd starting from 1. + nextID: 1, + writableChan: make(chan int, 1), + shutdownChan: make(chan struct{}), + errorChan: make(chan struct{}), + framer: framer, + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + controlBuf: newRecvBuffer(), + fc: &inFlow{limit: initialConnWindowSize}, + sendQuotaPool: newQuotaPool(defaultWindowSize), + scheme: scheme, + state: reachable, + activeStreams: make(map[uint32]*Stream), + authCreds: opts.AuthOptions, + maxStreams: math.MaxInt32, + streamSendQuota: defaultWindowSize, + } + go t.controller() + t.writableChan <- 0 + // Start the reader goroutine for incoming message. The threading model + // on receiving is that each transport has a dedicated goroutine which + // reads HTTP2 frame from network. Then it dispatches the frame to the + // corresponding stream entity. + go t.reader() + return t, nil +} + +func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { + fc := &inFlow{ + limit: initialWindowSize, + conn: t.fc, + } + // TODO(zhaoq): Handle uint32 overflow of Stream.id. + s := &Stream{ + id: t.nextID, + method: callHdr.Method, + buf: newRecvBuffer(), + fc: fc, + sendQuotaPool: newQuotaPool(int(t.streamSendQuota)), + headerChan: make(chan struct{}), + } + t.nextID += 2 + s.windowHandler = func(n int) { + t.updateWindow(s, uint32(n)) + } + // Make a stream be able to cancel the pending operations by itself. + s.ctx, s.cancel = context.WithCancel(ctx) + s.dec = &recvBufferReader{ + ctx: s.ctx, + recv: s.buf, + } + return s +} + +// NewStream creates a stream and register it into the transport as "active" +// streams. +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { + // Record the timeout value on the context. + var timeout time.Duration + if dl, ok := ctx.Deadline(); ok { + timeout = dl.Sub(time.Now()) + if timeout <= 0 { + return nil, ContextErr(context.DeadlineExceeded) + } + } + // Attach Auth info if there is any. + if t.authInfo != nil { + ctx = credentials.NewContext(ctx, t.authInfo) + } + authData := make(map[string]string) + for _, c := range t.authCreds { + // Construct URI required to get auth request metadata. + var port string + if pos := strings.LastIndex(t.target, ":"); pos != -1 { + // Omit port if it is the default one. + if t.target[pos+1:] != "443" { + port = ":" + t.target[pos+1:] + } + } + pos := strings.LastIndex(callHdr.Method, "/") + if pos == -1 { + return nil, StreamErrorf(codes.InvalidArgument, "transport: malformed method name: %q", callHdr.Method) + } + audience := "https://" + callHdr.Host + port + callHdr.Method[:pos] + data, err := c.GetRequestMetadata(ctx, audience) + if err != nil { + return nil, StreamErrorf(codes.InvalidArgument, "transport: %v", err) + } + for k, v := range data { + authData[k] = v + } + } + t.mu.Lock() + if t.state != reachable { + t.mu.Unlock() + return nil, ErrConnClosing + } + checkStreamsQuota := t.streamsQuota != nil + t.mu.Unlock() + if checkStreamsQuota { + sq, err := wait(ctx, t.shutdownChan, t.streamsQuota.acquire()) + if err != nil { + return nil, err + } + // Returns the quota balance back. + if sq > 1 { + t.streamsQuota.add(sq - 1) + } + } + if _, err := wait(ctx, t.shutdownChan, t.writableChan); err != nil { + // t.streamsQuota will be updated when t.CloseStream is invoked. + return nil, err + } + t.mu.Lock() + if t.state != reachable { + t.mu.Unlock() + return nil, ErrConnClosing + } + s := t.newStream(ctx, callHdr) + t.activeStreams[s.id] = s + t.mu.Unlock() + // HPACK encodes various headers. Note that once WriteField(...) is + // called, the corresponding headers/continuation frame has to be sent + // because hpack.Encoder is stateful. + t.hBuf.Reset() + t.hEnc.WriteField(hpack.HeaderField{Name: ":method", Value: "POST"}) + t.hEnc.WriteField(hpack.HeaderField{Name: ":scheme", Value: t.scheme}) + t.hEnc.WriteField(hpack.HeaderField{Name: ":path", Value: callHdr.Method}) + t.hEnc.WriteField(hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) + t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + t.hEnc.WriteField(hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) + t.hEnc.WriteField(hpack.HeaderField{Name: "te", Value: "trailers"}) + + if timeout > 0 { + t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-timeout", Value: timeoutEncode(timeout)}) + } + for k, v := range authData { + t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) + } + var ( + hasMD bool + endHeaders bool + ) + if md, ok := metadata.FromContext(ctx); ok { + hasMD = true + for k, v := range md { + for _, entry := range v { + t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) + } + } + } + first := true + // Sends the headers in a single batch even when they span multiple frames. + for !endHeaders { + size := t.hBuf.Len() + if size > http2MaxFrameLen { + size = http2MaxFrameLen + } else { + endHeaders = true + } + if first { + // Sends a HeadersFrame to server to start a new stream. + p := http2.HeadersFrameParam{ + StreamID: s.id, + BlockFragment: t.hBuf.Next(size), + EndStream: false, + EndHeaders: endHeaders, + } + // Do a force flush for the buffered frames iff it is the last headers frame + // and there is header metadata to be sent. Otherwise, there is flushing until + // the corresponding data frame is written. + err = t.framer.writeHeaders(hasMD && endHeaders, p) + first = false + } else { + // Sends Continuation frames for the leftover headers. + err = t.framer.writeContinuation(hasMD && endHeaders, s.id, endHeaders, t.hBuf.Next(size)) + } + if err != nil { + t.notifyError(err) + return nil, ConnectionErrorf("transport: %v", err) + } + } + t.writableChan <- 0 + return s, nil +} + +// CloseStream clears the footprint of a stream when the stream is not needed any more. +// This must not be executed in reader's goroutine. +func (t *http2Client) CloseStream(s *Stream, err error) { + var updateStreams bool + t.mu.Lock() + if t.streamsQuota != nil { + updateStreams = true + } + delete(t.activeStreams, s.id) + t.mu.Unlock() + if updateStreams { + t.streamsQuota.add(1) + } + s.mu.Lock() + if q := s.fc.restoreConn(); q > 0 { + t.controlBuf.put(&windowUpdate{0, q}) + } + if s.state == streamDone { + s.mu.Unlock() + return + } + if !s.headerDone { + close(s.headerChan) + s.headerDone = true + } + s.state = streamDone + s.mu.Unlock() + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), the caller needs + // to call cancel on the stream to interrupt the blocking on + // other goroutines. + s.cancel() + if _, ok := err.(StreamError); ok { + t.controlBuf.put(&resetStream{s.id, http2.ErrCodeCancel}) + } +} + +// Close kicks off the shutdown process of the transport. This should be called +// only once on a transport. Once it is called, the transport should not be +// accessed any more. +func (t *http2Client) Close() (err error) { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return errors.New("transport: Close() was already called") + } + t.state = closing + t.mu.Unlock() + close(t.shutdownChan) + err = t.conn.Close() + t.mu.Lock() + streams := t.activeStreams + t.activeStreams = nil + t.mu.Unlock() + // Notify all active streams. + for _, s := range streams { + s.mu.Lock() + if !s.headerDone { + close(s.headerChan) + s.headerDone = true + } + s.mu.Unlock() + s.write(recvMsg{err: ErrConnClosing}) + } + return +} + +// Write formats the data into HTTP2 data frame(s) and sends it out. The caller +// should proceed only if Write returns nil. +// TODO(zhaoq): opts.Delay is ignored in this implementation. Support it later +// if it improves the performance. +func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error { + r := bytes.NewBuffer(data) + for { + var p []byte + if r.Len() > 0 { + size := http2MaxFrameLen + s.sendQuotaPool.add(0) + // Wait until the stream has some quota to send the data. + sq, err := wait(s.ctx, t.shutdownChan, s.sendQuotaPool.acquire()) + if err != nil { + return err + } + t.sendQuotaPool.add(0) + // Wait until the transport has some quota to send the data. + tq, err := wait(s.ctx, t.shutdownChan, t.sendQuotaPool.acquire()) + if err != nil { + if _, ok := err.(StreamError); ok { + t.sendQuotaPool.cancel() + } + return err + } + if sq < size { + size = sq + } + if tq < size { + size = tq + } + p = r.Next(size) + ps := len(p) + if ps < sq { + // Overbooked stream quota. Return it back. + s.sendQuotaPool.add(sq - ps) + } + if ps < tq { + // Overbooked transport quota. Return it back. + t.sendQuotaPool.add(tq - ps) + } + } + var ( + endStream bool + forceFlush bool + ) + if opts.Last && r.Len() == 0 { + endStream = true + } + // Indicate there is a writer who is about to write a data frame. + t.framer.adjustNumWriters(1) + // Got some quota. Try to acquire writing privilege on the transport. + if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { + if t.framer.adjustNumWriters(-1) == 0 { + // This writer is the last one in this batch and has the + // responsibility to flush the buffered frames. It queues + // a flush request to controlBuf instead of flushing directly + // in order to avoid the race with other writing or flushing. + t.controlBuf.put(&flushIO{}) + } + return err + } + if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 { + // Do a force flush iff this is last frame for the entire gRPC message + // and the caller is the only writer at this moment. + forceFlush = true + } + // If WriteData fails, all the pending streams will be handled + // by http2Client.Close(). No explicit CloseStream() needs to be + // invoked. + if err := t.framer.writeData(forceFlush, s.id, endStream, p); err != nil { + t.notifyError(err) + return ConnectionErrorf("transport: %v", err) + } + if t.framer.adjustNumWriters(-1) == 0 { + t.framer.flushWrite() + } + t.writableChan <- 0 + if r.Len() == 0 { + break + } + } + if !opts.Last { + return nil + } + s.mu.Lock() + if s.state != streamDone { + if s.state == streamReadDone { + s.state = streamDone + } else { + s.state = streamWriteDone + } + } + s.mu.Unlock() + return nil +} + +func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) { + t.mu.Lock() + defer t.mu.Unlock() + if t.activeStreams == nil { + // The transport is closing. + return nil, false + } + if s, ok := t.activeStreams[f.Header().StreamID]; ok { + return s, true + } + return nil, false +} + +// updateWindow adjusts the inbound quota for the stream and the transport. +// Window updates will deliver to the controller for sending when +// the cumulative quota exceeds the corresponding threshold. +func (t *http2Client) updateWindow(s *Stream, n uint32) { + swu, cwu := s.fc.onRead(n) + if swu > 0 { + t.controlBuf.put(&windowUpdate{s.id, swu}) + } + if cwu > 0 { + t.controlBuf.put(&windowUpdate{0, cwu}) + } +} + +func (t *http2Client) handleData(f *http2.DataFrame) { + // Select the right stream to dispatch. + s, ok := t.getStream(f) + if !ok { + return + } + size := len(f.Data()) + if size > 0 { + if err := s.fc.onData(uint32(size)); err != nil { + if _, ok := err.(ConnectionError); ok { + t.notifyError(err) + return + } + s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + return + } + s.state = streamDone + s.statusCode = codes.Internal + s.statusDesc = err.Error() + s.mu.Unlock() + s.write(recvMsg{err: io.EOF}) + t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) + return + } + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + data := make([]byte, size) + copy(data, f.Data()) + s.write(recvMsg{data: data}) + } + // The server has closed the stream without sending trailers. Record that + // the read direction is closed, and set the status appropriately. + if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) { + s.mu.Lock() + if s.state == streamWriteDone { + s.state = streamDone + } else { + s.state = streamReadDone + } + s.statusCode = codes.Internal + s.statusDesc = "server closed the stream without sending trailers" + s.mu.Unlock() + s.write(recvMsg{err: io.EOF}) + } +} + +func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { + s, ok := t.getStream(f) + if !ok { + return + } + s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + return + } + s.state = streamDone + if !s.headerDone { + close(s.headerChan) + s.headerDone = true + } + s.statusCode, ok = http2RSTErrConvTab[http2.ErrCode(f.ErrCode)] + if !ok { + grpclog.Println("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error ", f.ErrCode) + } + s.mu.Unlock() + s.write(recvMsg{err: io.EOF}) +} + +func (t *http2Client) handleSettings(f *http2.SettingsFrame) { + if f.IsAck() { + return + } + var ss []http2.Setting + f.ForeachSetting(func(s http2.Setting) error { + ss = append(ss, s) + return nil + }) + // The settings will be applied once the ack is sent. + t.controlBuf.put(&settings{ack: true, ss: ss}) +} + +func (t *http2Client) handlePing(f *http2.PingFrame) { + t.controlBuf.put(&ping{true}) +} + +func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { + // TODO(zhaoq): GoAwayFrame handler to be implemented +} + +func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { + id := f.Header().StreamID + incr := f.Increment + if id == 0 { + t.sendQuotaPool.add(int(incr)) + return + } + if s, ok := t.getStream(f); ok { + s.sendQuotaPool.add(int(incr)) + } +} + +// operateHeader takes action on the decoded headers. It returns the current +// stream if there are remaining headers on the wire (in the following +// Continuation frame). +func (t *http2Client) operateHeaders(hDec *hpackDecoder, s *Stream, frame headerFrame, endStream bool) (pendingStream *Stream) { + defer func() { + if pendingStream == nil { + hDec.state = decodeState{} + } + }() + endHeaders, err := hDec.decodeClientHTTP2Headers(frame) + if s == nil { + // s has been closed. + return nil + } + if err != nil { + s.write(recvMsg{err: err}) + // Something wrong. Stops reading even when there is remaining. + return nil + } + if !endHeaders { + return s + } + + s.mu.Lock() + if !s.headerDone { + if !endStream && len(hDec.state.mdata) > 0 { + s.header = hDec.state.mdata + } + close(s.headerChan) + s.headerDone = true + } + if !endStream || s.state == streamDone { + s.mu.Unlock() + return nil + } + + if len(hDec.state.mdata) > 0 { + s.trailer = hDec.state.mdata + } + s.state = streamDone + s.statusCode = hDec.state.statusCode + s.statusDesc = hDec.state.statusDesc + s.mu.Unlock() + + s.write(recvMsg{err: io.EOF}) + return nil +} + +// reader runs as a separate goroutine in charge of reading data from network +// connection. +// +// TODO(zhaoq): currently one reader per transport. Investigate whether this is +// optimal. +// TODO(zhaoq): Check the validity of the incoming frame sequence. +func (t *http2Client) reader() { + // Check the validity of server preface. + frame, err := t.framer.readFrame() + if err != nil { + t.notifyError(err) + return + } + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + t.notifyError(err) + return + } + t.handleSettings(sf) + + hDec := newHPACKDecoder() + var curStream *Stream + // loop to keep reading incoming messages on this transport. + for { + frame, err := t.framer.readFrame() + if err != nil { + t.notifyError(err) + return + } + switch frame := frame.(type) { + case *http2.HeadersFrame: + // operateHeaders has to be invoked regardless the value of curStream + // because the HPACK decoder needs to be updated using the received + // headers. + curStream, _ = t.getStream(frame) + endStream := frame.Header().Flags.Has(http2.FlagHeadersEndStream) + curStream = t.operateHeaders(hDec, curStream, frame, endStream) + case *http2.ContinuationFrame: + curStream = t.operateHeaders(hDec, curStream, frame, false) + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.GoAwayFrame: + t.handleGoAway(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + default: + grpclog.Printf("transport: http2Client.reader got unhandled frame type %v.", frame) + } + } +} + +func (t *http2Client) applySettings(ss []http2.Setting) { + for _, s := range ss { + switch s.ID { + case http2.SettingMaxConcurrentStreams: + // TODO(zhaoq): This is a hack to avoid significant refactoring of the + // code to deal with the unrealistic int32 overflow. Probably will try + // to find a better way to handle this later. + if s.Val > math.MaxInt32 { + s.Val = math.MaxInt32 + } + t.mu.Lock() + reset := t.streamsQuota != nil + if !reset { + t.streamsQuota = newQuotaPool(int(s.Val) - len(t.activeStreams)) + } + ms := t.maxStreams + t.maxStreams = int(s.Val) + t.mu.Unlock() + if reset { + t.streamsQuota.reset(int(s.Val) - ms) + } + case http2.SettingInitialWindowSize: + t.mu.Lock() + for _, stream := range t.activeStreams { + // Adjust the sending quota for each stream. + stream.sendQuotaPool.reset(int(s.Val - t.streamSendQuota)) + } + t.streamSendQuota = s.Val + t.mu.Unlock() + } + } +} + +// controller running in a separate goroutine takes charge of sending control +// frames (e.g., window update, reset stream, setting, etc.) to the server. +func (t *http2Client) controller() { + for { + select { + case i := <-t.controlBuf.get(): + t.controlBuf.load() + select { + case <-t.writableChan: + switch i := i.(type) { + case *windowUpdate: + t.framer.writeWindowUpdate(true, i.streamID, i.increment) + case *settings: + if i.ack { + t.framer.writeSettingsAck(true) + t.applySettings(i.ss) + } else { + t.framer.writeSettings(true, i.ss...) + } + case *resetStream: + t.framer.writeRSTStream(true, i.streamID, i.code) + case *flushIO: + t.framer.flushWrite() + case *ping: + // TODO(zhaoq): Ack with all-0 data now. will change to some + // meaningful content when this is actually in use. + t.framer.writePing(true, i.ack, [8]byte{}) + default: + grpclog.Printf("transport: http2Client.controller got unexpected item type %v\n", i) + } + t.writableChan <- 0 + continue + case <-t.shutdownChan: + return + } + case <-t.shutdownChan: + return + } + } +} + +func (t *http2Client) Error() <-chan struct{} { + return t.errorChan +} + +func (t *http2Client) notifyError(err error) { + t.mu.Lock() + defer t.mu.Unlock() + // make sure t.errorChan is closed only once. + if t.state == reachable { + t.state = unreachable + close(t.errorChan) + grpclog.Printf("transport: http2Client.notifyError got notified that the client transport was broken %v.", err) + } +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/transport/http2_server.go b/Godeps/_workspace/src/google.golang.org/grpc/transport/http2_server.go new file mode 100644 index 000000000..8856d7f4c --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/transport/http2_server.go @@ -0,0 +1,691 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package transport + +import ( + "bytes" + "errors" + "io" + "math" + "net" + "strconv" + "sync" + + "github.com/bradfitz/http2" + "github.com/bradfitz/http2/hpack" + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" +) + +// ErrIllegalHeaderWrite indicates that setting header is illegal because of +// the stream's state. +var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") + +// http2Server implements the ServerTransport interface with HTTP2. +type http2Server struct { + conn net.Conn + maxStreamID uint32 // max stream ID ever seen + authInfo credentials.AuthInfo // auth info about the connection + // writableChan synchronizes write access to the transport. + // A writer acquires the write lock by sending a value on writableChan + // and releases it by receiving from writableChan. + writableChan chan int + // shutdownChan is closed when Close is called. + // Blocking operations should select on shutdownChan to avoid + // blocking forever after Close. + shutdownChan chan struct{} + framer *framer + hBuf *bytes.Buffer // the buffer for HPACK encoding + hEnc *hpack.Encoder // HPACK encoder + + // The max number of concurrent streams. + maxStreams uint32 + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + controlBuf *recvBuffer + fc *inFlow + // sendQuotaPool provides flow control to outbound message. + sendQuotaPool *quotaPool + + mu sync.Mutex // guard the following + state transportState + activeStreams map[uint32]*Stream + // the per-stream outbound flow control window size set by the peer. + streamSendQuota uint32 +} + +// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is +// returned if something goes wrong. +func newHTTP2Server(conn net.Conn, maxStreams uint32, authInfo credentials.AuthInfo) (_ ServerTransport, err error) { + framer := newFramer(conn) + // Send initial settings as connection preface to client. + var settings []http2.Setting + // TODO(zhaoq): Have a better way to signal "no limit" because 0 is + // permitted in the HTTP2 spec. + if maxStreams == 0 { + maxStreams = math.MaxUint32 + } else { + settings = append(settings, http2.Setting{http2.SettingMaxConcurrentStreams, maxStreams}) + } + if initialWindowSize != defaultWindowSize { + settings = append(settings, http2.Setting{http2.SettingInitialWindowSize, uint32(initialWindowSize)}) + } + if err := framer.writeSettings(true, settings...); err != nil { + return nil, ConnectionErrorf("transport: %v", err) + } + // Adjust the connection flow control window if needed. + if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 { + if err := framer.writeWindowUpdate(true, 0, delta); err != nil { + return nil, ConnectionErrorf("transport: %v", err) + } + } + var buf bytes.Buffer + t := &http2Server{ + conn: conn, + authInfo: authInfo, + framer: framer, + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + maxStreams: maxStreams, + controlBuf: newRecvBuffer(), + fc: &inFlow{limit: initialConnWindowSize}, + sendQuotaPool: newQuotaPool(defaultWindowSize), + state: reachable, + writableChan: make(chan int, 1), + shutdownChan: make(chan struct{}), + activeStreams: make(map[uint32]*Stream), + streamSendQuota: defaultWindowSize, + } + go t.controller() + t.writableChan <- 0 + return t, nil +} + +// operateHeader takes action on the decoded headers. It returns the current +// stream if there are remaining headers on the wire (in the following +// Continuation frame). +func (t *http2Server) operateHeaders(hDec *hpackDecoder, s *Stream, frame headerFrame, endStream bool, handle func(*Stream), wg *sync.WaitGroup) (pendingStream *Stream) { + defer func() { + if pendingStream == nil { + hDec.state = decodeState{} + } + }() + endHeaders, err := hDec.decodeServerHTTP2Headers(frame) + if s == nil { + // s has been closed. + return nil + } + if err != nil { + grpclog.Printf("transport: http2Server.operateHeader found %v", err) + if se, ok := err.(StreamError); ok { + t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]}) + } + return nil + } + if endStream { + // s is just created by the caller. No lock needed. + s.state = streamReadDone + } + if !endHeaders { + return s + } + t.mu.Lock() + if t.state != reachable { + t.mu.Unlock() + return nil + } + if uint32(len(t.activeStreams)) >= t.maxStreams { + t.mu.Unlock() + t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream}) + return nil + } + s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota)) + t.activeStreams[s.id] = s + t.mu.Unlock() + s.windowHandler = func(n int) { + t.updateWindow(s, uint32(n)) + } + if hDec.state.timeoutSet { + s.ctx, s.cancel = context.WithTimeout(context.TODO(), hDec.state.timeout) + } else { + s.ctx, s.cancel = context.WithCancel(context.TODO()) + } + // Attach Auth info if there is any. + if t.authInfo != nil { + s.ctx = credentials.NewContext(s.ctx, t.authInfo) + } + // Cache the current stream to the context so that the server application + // can find out. Required when the server wants to send some metadata + // back to the client (unary call only). + s.ctx = newContextWithStream(s.ctx, s) + // Attach the received metadata to the context. + if len(hDec.state.mdata) > 0 { + s.ctx = metadata.NewContext(s.ctx, hDec.state.mdata) + } + + s.dec = &recvBufferReader{ + ctx: s.ctx, + recv: s.buf, + } + s.method = hDec.state.method + + wg.Add(1) + go func() { + handle(s) + wg.Done() + }() + return nil +} + +// HandleStreams receives incoming streams using the given handler. This is +// typically run in a separate goroutine. +func (t *http2Server) HandleStreams(handle func(*Stream)) { + // Check the validity of client preface. + preface := make([]byte, len(clientPreface)) + if _, err := io.ReadFull(t.conn, preface); err != nil { + grpclog.Printf("transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) + t.Close() + return + } + if !bytes.Equal(preface, clientPreface) { + grpclog.Printf("transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) + t.Close() + return + } + + frame, err := t.framer.readFrame() + if err != nil { + grpclog.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err) + t.Close() + return + } + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + grpclog.Printf("transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) + t.Close() + return + } + t.handleSettings(sf) + + hDec := newHPACKDecoder() + var curStream *Stream + var wg sync.WaitGroup + defer wg.Wait() + for { + frame, err := t.framer.readFrame() + if err != nil { + t.Close() + return + } + switch frame := frame.(type) { + case *http2.HeadersFrame: + id := frame.Header().StreamID + if id%2 != 1 || id <= t.maxStreamID { + // illegal gRPC stream id. + grpclog.Println("transport: http2Server.HandleStreams received an illegal stream id: ", id) + t.Close() + break + } + t.maxStreamID = id + buf := newRecvBuffer() + fc := &inFlow{ + limit: initialWindowSize, + conn: t.fc, + } + curStream = &Stream{ + id: frame.Header().StreamID, + st: t, + buf: buf, + fc: fc, + } + endStream := frame.Header().Flags.Has(http2.FlagHeadersEndStream) + curStream = t.operateHeaders(hDec, curStream, frame, endStream, handle, &wg) + case *http2.ContinuationFrame: + curStream = t.operateHeaders(hDec, curStream, frame, false, handle, &wg) + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + case *http2.GoAwayFrame: + break + default: + grpclog.Printf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) + } + } +} + +func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { + t.mu.Lock() + defer t.mu.Unlock() + if t.activeStreams == nil { + // The transport is closing. + return nil, false + } + s, ok := t.activeStreams[f.Header().StreamID] + if !ok { + // The stream is already done. + return nil, false + } + return s, true +} + +// updateWindow adjusts the inbound quota for the stream and the transport. +// Window updates will deliver to the controller for sending when +// the cumulative quota exceeds the corresponding threshold. +func (t *http2Server) updateWindow(s *Stream, n uint32) { + swu, cwu := s.fc.onRead(n) + if swu > 0 { + t.controlBuf.put(&windowUpdate{s.id, swu}) + } + if cwu > 0 { + t.controlBuf.put(&windowUpdate{0, cwu}) + } +} + +func (t *http2Server) handleData(f *http2.DataFrame) { + // Select the right stream to dispatch. + s, ok := t.getStream(f) + if !ok { + return + } + size := len(f.Data()) + if size > 0 { + if err := s.fc.onData(uint32(size)); err != nil { + if _, ok := err.(ConnectionError); ok { + grpclog.Printf("transport: http2Server %v", err) + t.Close() + return + } + t.closeStream(s) + t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) + return + } + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + data := make([]byte, size) + copy(data, f.Data()) + s.write(recvMsg{data: data}) + } + if f.Header().Flags.Has(http2.FlagDataEndStream) { + // Received the end of stream from the client. + s.mu.Lock() + if s.state != streamDone { + if s.state == streamWriteDone { + s.state = streamDone + } else { + s.state = streamReadDone + } + } + s.mu.Unlock() + s.write(recvMsg{err: io.EOF}) + } +} + +func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) { + s, ok := t.getStream(f) + if !ok { + return + } + t.closeStream(s) +} + +func (t *http2Server) handleSettings(f *http2.SettingsFrame) { + if f.IsAck() { + return + } + var ss []http2.Setting + f.ForeachSetting(func(s http2.Setting) error { + ss = append(ss, s) + return nil + }) + // The settings will be applied once the ack is sent. + t.controlBuf.put(&settings{ack: true, ss: ss}) +} + +func (t *http2Server) handlePing(f *http2.PingFrame) { + t.controlBuf.put(&ping{true}) +} + +func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { + id := f.Header().StreamID + incr := f.Increment + if id == 0 { + t.sendQuotaPool.add(int(incr)) + return + } + if s, ok := t.getStream(f); ok { + s.sendQuotaPool.add(int(incr)) + } +} + +func (t *http2Server) writeHeaders(s *Stream, b *bytes.Buffer, endStream bool) error { + first := true + endHeaders := false + var err error + // Sends the headers in a single batch. + for !endHeaders { + size := t.hBuf.Len() + if size > http2MaxFrameLen { + size = http2MaxFrameLen + } else { + endHeaders = true + } + if first { + p := http2.HeadersFrameParam{ + StreamID: s.id, + BlockFragment: b.Next(size), + EndStream: endStream, + EndHeaders: endHeaders, + } + err = t.framer.writeHeaders(endHeaders, p) + first = false + } else { + err = t.framer.writeContinuation(endHeaders, s.id, endHeaders, b.Next(size)) + } + if err != nil { + t.Close() + return ConnectionErrorf("transport: %v", err) + } + } + return nil +} + +// WriteHeader sends the header metedata md back to the client. +func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { + s.mu.Lock() + if s.headerOk || s.state == streamDone { + s.mu.Unlock() + return ErrIllegalHeaderWrite + } + s.headerOk = true + s.mu.Unlock() + if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { + return err + } + t.hBuf.Reset() + t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + for k, v := range md { + for _, entry := range v { + t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) + } + } + if err := t.writeHeaders(s, t.hBuf, false); err != nil { + return err + } + t.writableChan <- 0 + return nil +} + +// WriteStatus sends stream status to the client and terminates the stream. +// There is no further I/O operations being able to perform on this stream. +// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early +// OK is adopted. +func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error { + s.mu.RLock() + if s.state == streamDone { + s.mu.RUnlock() + return nil + } + s.mu.RUnlock() + if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { + return err + } + t.hBuf.Reset() + t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + t.hEnc.WriteField( + hpack.HeaderField{ + Name: "grpc-status", + Value: strconv.Itoa(int(statusCode)), + }) + t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: statusDesc}) + // Attach the trailer metadata. + for k, v := range s.trailer { + for _, entry := range v { + t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) + } + } + if err := t.writeHeaders(s, t.hBuf, true); err != nil { + t.Close() + return err + } + t.closeStream(s) + t.writableChan <- 0 + return nil +} + +// Write converts the data into HTTP2 data frame and sends it out. Non-nil error +// is returns if it fails (e.g., framing error, transport error). +func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error { + // TODO(zhaoq): Support multi-writers for a single stream. + var writeHeaderFrame bool + s.mu.Lock() + if !s.headerOk { + writeHeaderFrame = true + s.headerOk = true + } + s.mu.Unlock() + if writeHeaderFrame { + if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { + return err + } + t.hBuf.Reset() + t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + p := http2.HeadersFrameParam{ + StreamID: s.id, + BlockFragment: t.hBuf.Bytes(), + EndHeaders: true, + } + if err := t.framer.writeHeaders(false, p); err != nil { + t.Close() + return ConnectionErrorf("transport: %v", err) + } + t.writableChan <- 0 + } + r := bytes.NewBuffer(data) + for { + if r.Len() == 0 { + return nil + } + size := http2MaxFrameLen + s.sendQuotaPool.add(0) + // Wait until the stream has some quota to send the data. + sq, err := wait(s.ctx, t.shutdownChan, s.sendQuotaPool.acquire()) + if err != nil { + return err + } + t.sendQuotaPool.add(0) + // Wait until the transport has some quota to send the data. + tq, err := wait(s.ctx, t.shutdownChan, t.sendQuotaPool.acquire()) + if err != nil { + if _, ok := err.(StreamError); ok { + t.sendQuotaPool.cancel() + } + return err + } + if sq < size { + size = sq + } + if tq < size { + size = tq + } + p := r.Next(size) + ps := len(p) + if ps < sq { + // Overbooked stream quota. Return it back. + s.sendQuotaPool.add(sq - ps) + } + if ps < tq { + // Overbooked transport quota. Return it back. + t.sendQuotaPool.add(tq - ps) + } + t.framer.adjustNumWriters(1) + // Got some quota. Try to acquire writing privilege on the + // transport. + if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { + if t.framer.adjustNumWriters(-1) == 0 { + // This writer is the last one in this batch and has the + // responsibility to flush the buffered frames. It queues + // a flush request to controlBuf instead of flushing directly + // in order to avoid the race with other writing or flushing. + t.controlBuf.put(&flushIO{}) + } + return err + } + var forceFlush bool + if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 && !opts.Last { + forceFlush = true + } + if err := t.framer.writeData(forceFlush, s.id, false, p); err != nil { + t.Close() + return ConnectionErrorf("transport: %v", err) + } + if t.framer.adjustNumWriters(-1) == 0 { + t.framer.flushWrite() + } + t.writableChan <- 0 + } + +} + +func (t *http2Server) applySettings(ss []http2.Setting) { + for _, s := range ss { + if s.ID == http2.SettingInitialWindowSize { + t.mu.Lock() + defer t.mu.Unlock() + for _, stream := range t.activeStreams { + stream.sendQuotaPool.reset(int(s.Val - t.streamSendQuota)) + } + t.streamSendQuota = s.Val + } + + } +} + +// controller running in a separate goroutine takes charge of sending control +// frames (e.g., window update, reset stream, setting, etc.) to the server. +func (t *http2Server) controller() { + for { + select { + case i := <-t.controlBuf.get(): + t.controlBuf.load() + select { + case <-t.writableChan: + switch i := i.(type) { + case *windowUpdate: + t.framer.writeWindowUpdate(true, i.streamID, i.increment) + case *settings: + if i.ack { + t.framer.writeSettingsAck(true) + t.applySettings(i.ss) + } else { + t.framer.writeSettings(true, i.ss...) + } + case *resetStream: + t.framer.writeRSTStream(true, i.streamID, i.code) + case *flushIO: + t.framer.flushWrite() + case *ping: + // TODO(zhaoq): Ack with all-0 data now. will change to some + // meaningful content when this is actually in use. + t.framer.writePing(true, i.ack, [8]byte{}) + default: + grpclog.Printf("transport: http2Server.controller got unexpected item type %v\n", i) + } + t.writableChan <- 0 + continue + case <-t.shutdownChan: + return + } + case <-t.shutdownChan: + return + } + } +} + +// Close starts shutting down the http2Server transport. +// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This +// could cause some resource issue. Revisit this later. +func (t *http2Server) Close() (err error) { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return errors.New("transport: Close() was already called") + } + t.state = closing + streams := t.activeStreams + t.activeStreams = nil + t.mu.Unlock() + close(t.shutdownChan) + err = t.conn.Close() + // Notify all active streams. + for _, s := range streams { + s.write(recvMsg{err: ErrConnClosing}) + } + return +} + +// closeStream clears the footprint of a stream when the stream is not needed +// any more. +func (t *http2Server) closeStream(s *Stream) { + t.mu.Lock() + delete(t.activeStreams, s.id) + t.mu.Unlock() + if q := s.fc.restoreConn(); q > 0 { + t.controlBuf.put(&windowUpdate{0, q}) + } + s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + return + } + s.state = streamDone + s.mu.Unlock() + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), the caller needs + // to call cancel on the stream to interrupt the blocking on + // other goroutines. + s.cancel() +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/transport/http_util.go b/Godeps/_workspace/src/google.golang.org/grpc/transport/http_util.go new file mode 100644 index 000000000..e8ed39e10 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/transport/http_util.go @@ -0,0 +1,454 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package transport + +import ( + "bufio" + "fmt" + "io" + "net" + "strconv" + "strings" + "sync/atomic" + "time" + + "github.com/bradfitz/http2" + "github.com/bradfitz/http2/hpack" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" +) + +const ( + // The primary user agent + primaryUA = "grpc-go/0.11" + // http2MaxFrameLen specifies the max length of a HTTP2 frame. + http2MaxFrameLen = 16384 // 16KB frame + // http://http2.github.io/http2-spec/#SettingValues + http2InitHeaderTableSize = 4096 + // http2IOBufSize specifies the buffer size for sending frames. + http2IOBufSize = 32 * 1024 +) + +var ( + clientPreface = []byte(http2.ClientPreface) + http2RSTErrConvTab = map[http2.ErrCode]codes.Code{ + http2.ErrCodeNo: codes.Internal, + http2.ErrCodeProtocol: codes.Internal, + http2.ErrCodeInternal: codes.Internal, + http2.ErrCodeFlowControl: codes.ResourceExhausted, + http2.ErrCodeSettingsTimeout: codes.Internal, + http2.ErrCodeFrameSize: codes.Internal, + http2.ErrCodeRefusedStream: codes.Unavailable, + http2.ErrCodeCancel: codes.Canceled, + http2.ErrCodeCompression: codes.Internal, + http2.ErrCodeConnect: codes.Internal, + http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, + http2.ErrCodeInadequateSecurity: codes.PermissionDenied, + } + statusCodeConvTab = map[codes.Code]http2.ErrCode{ + codes.Internal: http2.ErrCodeInternal, + codes.Canceled: http2.ErrCodeCancel, + codes.Unavailable: http2.ErrCodeRefusedStream, + codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm, + codes.PermissionDenied: http2.ErrCodeInadequateSecurity, + } +) + +// Records the states during HPACK decoding. Must be reset once the +// decoding of the entire headers are finished. +type decodeState struct { + // statusCode caches the stream status received from the trailer + // the server sent. Client side only. + statusCode codes.Code + statusDesc string + // Server side only fields. + timeoutSet bool + timeout time.Duration + method string + // key-value metadata map from the peer. + mdata map[string][]string +} + +// An hpackDecoder decodes HTTP2 headers which may span multiple frames. +type hpackDecoder struct { + h *hpack.Decoder + state decodeState + err error // The err when decoding +} + +// A headerFrame is either a http2.HeaderFrame or http2.ContinuationFrame. +type headerFrame interface { + Header() http2.FrameHeader + HeaderBlockFragment() []byte + HeadersEnded() bool +} + +// isReservedHeader checks whether hdr belongs to HTTP2 headers +// reserved by gRPC protocol. Any other headers are classified as the +// user-specified metadata. +func isReservedHeader(hdr string) bool { + if hdr[0] == ':' { + return true + } + switch hdr { + case "content-type", + "grpc-message-type", + "grpc-encoding", + "grpc-message", + "grpc-status", + "grpc-timeout", + "te": + return true + default: + return false + } +} + +func newHPACKDecoder() *hpackDecoder { + d := &hpackDecoder{} + d.h = hpack.NewDecoder(http2InitHeaderTableSize, func(f hpack.HeaderField) { + switch f.Name { + case "content-type": + // TODO(zhaoq): Tentatively disable the check until a bug is fixed. + /* + if !strings.Contains(f.Value, "application/grpc") { + d.err = StreamErrorf(codes.FailedPrecondition, "transport: received the unexpected header") + return + } + */ + case "grpc-status": + code, err := strconv.Atoi(f.Value) + if err != nil { + d.err = StreamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err) + return + } + d.state.statusCode = codes.Code(code) + case "grpc-message": + d.state.statusDesc = f.Value + case "grpc-timeout": + d.state.timeoutSet = true + var err error + d.state.timeout, err = timeoutDecode(f.Value) + if err != nil { + d.err = StreamErrorf(codes.Internal, "transport: malformed time-out: %v", err) + return + } + case ":path": + d.state.method = f.Value + default: + if !isReservedHeader(f.Name) { + if f.Name == "user-agent" { + i := strings.LastIndex(f.Value, " ") + if i == -1 { + // There is no application user agent string being set. + return + } + // Extract the application user agent string. + f.Value = f.Value[:i] + } + if d.state.mdata == nil { + d.state.mdata = make(map[string][]string) + } + k, v, err := metadata.DecodeKeyValue(f.Name, f.Value) + if err != nil { + grpclog.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err) + return + } + d.state.mdata[k] = append(d.state.mdata[k], v) + } + } + }) + return d +} + +func (d *hpackDecoder) decodeClientHTTP2Headers(frame headerFrame) (endHeaders bool, err error) { + d.err = nil + _, err = d.h.Write(frame.HeaderBlockFragment()) + if err != nil { + err = StreamErrorf(codes.Internal, "transport: HPACK header decode error: %v", err) + } + + if frame.HeadersEnded() { + if closeErr := d.h.Close(); closeErr != nil && err == nil { + err = StreamErrorf(codes.Internal, "transport: HPACK decoder close error: %v", closeErr) + } + endHeaders = true + } + + if err == nil && d.err != nil { + err = d.err + } + return +} + +func (d *hpackDecoder) decodeServerHTTP2Headers(frame headerFrame) (endHeaders bool, err error) { + d.err = nil + _, err = d.h.Write(frame.HeaderBlockFragment()) + if err != nil { + err = StreamErrorf(codes.Internal, "transport: HPACK header decode error: %v", err) + } + + if frame.HeadersEnded() { + if closeErr := d.h.Close(); closeErr != nil && err == nil { + err = StreamErrorf(codes.Internal, "transport: HPACK decoder close error: %v", closeErr) + } + endHeaders = true + } + + if err == nil && d.err != nil { + err = d.err + } + return +} + +type timeoutUnit uint8 + +const ( + hour timeoutUnit = 'H' + minute timeoutUnit = 'M' + second timeoutUnit = 'S' + millisecond timeoutUnit = 'm' + microsecond timeoutUnit = 'u' + nanosecond timeoutUnit = 'n' +) + +func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) { + switch u { + case hour: + return time.Hour, true + case minute: + return time.Minute, true + case second: + return time.Second, true + case millisecond: + return time.Millisecond, true + case microsecond: + return time.Microsecond, true + case nanosecond: + return time.Nanosecond, true + default: + } + return +} + +const maxTimeoutValue int64 = 100000000 - 1 + +// div does integer division and round-up the result. Note that this is +// equivalent to (d+r-1)/r but has less chance to overflow. +func div(d, r time.Duration) int64 { + if m := d % r; m > 0 { + return int64(d/r + 1) + } + return int64(d / r) +} + +// TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it. +func timeoutEncode(t time.Duration) string { + if d := div(t, time.Nanosecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "n" + } + if d := div(t, time.Microsecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "u" + } + if d := div(t, time.Millisecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "m" + } + if d := div(t, time.Second); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "S" + } + if d := div(t, time.Minute); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "M" + } + // Note that maxTimeoutValue * time.Hour > MaxInt64. + return strconv.FormatInt(div(t, time.Hour), 10) + "H" +} + +func timeoutDecode(s string) (time.Duration, error) { + size := len(s) + if size < 2 { + return 0, fmt.Errorf("transport: timeout string is too short: %q", s) + } + unit := timeoutUnit(s[size-1]) + d, ok := timeoutUnitToDuration(unit) + if !ok { + return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s) + } + t, err := strconv.ParseInt(s[:size-1], 10, 64) + if err != nil { + return 0, err + } + return d * time.Duration(t), nil +} + +type framer struct { + numWriters int32 + reader io.Reader + writer *bufio.Writer + fr *http2.Framer +} + +func newFramer(conn net.Conn) *framer { + f := &framer{ + reader: conn, + writer: bufio.NewWriterSize(conn, http2IOBufSize), + } + f.fr = http2.NewFramer(f.writer, f.reader) + return f +} + +func (f *framer) adjustNumWriters(i int32) int32 { + return atomic.AddInt32(&f.numWriters, i) +} + +// The following writeXXX functions can only be called when the caller gets +// unblocked from writableChan channel (i.e., owns the privilege to write). + +func (f *framer) writeContinuation(forceFlush bool, streamID uint32, endHeaders bool, headerBlockFragment []byte) error { + if err := f.fr.WriteContinuation(streamID, endHeaders, headerBlockFragment); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writeData(forceFlush bool, streamID uint32, endStream bool, data []byte) error { + if err := f.fr.WriteData(streamID, endStream, data); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writeGoAway(forceFlush bool, maxStreamID uint32, code http2.ErrCode, debugData []byte) error { + if err := f.fr.WriteGoAway(maxStreamID, code, debugData); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writeHeaders(forceFlush bool, p http2.HeadersFrameParam) error { + if err := f.fr.WriteHeaders(p); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writePing(forceFlush, ack bool, data [8]byte) error { + if err := f.fr.WritePing(ack, data); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writePriority(forceFlush bool, streamID uint32, p http2.PriorityParam) error { + if err := f.fr.WritePriority(streamID, p); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writePushPromise(forceFlush bool, p http2.PushPromiseParam) error { + if err := f.fr.WritePushPromise(p); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writeRSTStream(forceFlush bool, streamID uint32, code http2.ErrCode) error { + if err := f.fr.WriteRSTStream(streamID, code); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writeSettings(forceFlush bool, settings ...http2.Setting) error { + if err := f.fr.WriteSettings(settings...); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writeSettingsAck(forceFlush bool) error { + if err := f.fr.WriteSettingsAck(); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writeWindowUpdate(forceFlush bool, streamID, incr uint32) error { + if err := f.fr.WriteWindowUpdate(streamID, incr); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) flushWrite() error { + return f.writer.Flush() +} + +func (f *framer) readFrame() (http2.Frame, error) { + return f.fr.ReadFrame() +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/transport/http_util_test.go b/Godeps/_workspace/src/google.golang.org/grpc/transport/http_util_test.go new file mode 100644 index 000000000..b5b18bf43 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/transport/http_util_test.go @@ -0,0 +1,87 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package transport + +import ( + "fmt" + "testing" + "time" +) + +func TestTimeoutEncode(t *testing.T) { + for _, test := range []struct { + in string + out string + }{ + {"12345678ns", "12345678n"}, + {"123456789ns", "123457u"}, + {"12345678us", "12345678u"}, + {"123456789us", "123457m"}, + {"12345678ms", "12345678m"}, + {"123456789ms", "123457S"}, + {"12345678s", "12345678S"}, + {"123456789s", "2057614M"}, + {"12345678m", "12345678M"}, + {"123456789m", "2057614H"}, + } { + d, err := time.ParseDuration(test.in) + if err != nil { + t.Fatalf("failed to parse duration string %s: %v", test.in, err) + } + out := timeoutEncode(d) + if out != test.out { + t.Fatalf("timeoutEncode(%s) = %s, want %s", test.in, out, test.out) + } + } +} + +func TestTimeoutDecode(t *testing.T) { + for _, test := range []struct { + // input + s string + // output + d time.Duration + err error + }{ + {"1234S", time.Second * 1234, nil}, + {"1234x", 0, fmt.Errorf("transport: timeout unit is not recognized: %q", "1234x")}, + {"1", 0, fmt.Errorf("transport: timeout string is too short: %q", "1")}, + {"", 0, fmt.Errorf("transport: timeout string is too short: %q", "")}, + } { + d, err := timeoutDecode(test.s) + if d != test.d || fmt.Sprint(err) != fmt.Sprint(test.err) { + t.Fatalf("timeoutDecode(%q) = %d, %v, want %d, %v", test.s, int64(d), err, int64(test.d), test.err) + } + } +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/transport/testdata/ca.pem b/Godeps/_workspace/src/google.golang.org/grpc/transport/testdata/ca.pem new file mode 100644 index 000000000..999da8977 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/transport/testdata/ca.pem @@ -0,0 +1,14 @@ +-----BEGIN CERTIFICATE----- +MIICIzCCAYwCCQCFTbF7XNSvvjANBgkqhkiG9w0BAQUFADBWMQswCQYDVQQGEwJB +VTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0 +cyBQdHkgTHRkMQ8wDQYDVQQDDAZ0ZXN0Y2EwHhcNMTQwNzE3MjMxNzUxWhcNMjQw +NzE0MjMxNzUxWjBWMQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEh +MB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMQ8wDQYDVQQDDAZ0ZXN0 +Y2EwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMBA3wVeTGHZR1Rye/i+J8a2 +cu5gXwFV6TnObzGM7bLFCO5i9v4mLo4iFzPsHmWDUxKS3Y8iXbu0eYBlLoNY0lSv +xDx33O+DuwMmVN+DzSD+Eod9zfvwOWHsazYCZT2PhNxnVWIuJXViY4JAHUGodjx+ +QAi6yCAurUZGvYXGgZSBAgMBAAEwDQYJKoZIhvcNAQEFBQADgYEAQoQVD8bwdtWJ +AniGBwcCfqYyH+/KpA10AcebJVVTyYbYvI9Q8d6RSVu4PZy9OALHR/QrWBdYTAyz +fNAmc2cmdkSRJzjhIaOstnQy1J+Fk0T9XyvQtq499yFbq9xogUVlEGH62xP6vH0Y +5ukK//dCPAzA11YuX2rnex0JhuTQfcI= +-----END CERTIFICATE----- diff --git a/Godeps/_workspace/src/google.golang.org/grpc/transport/testdata/server1.key b/Godeps/_workspace/src/google.golang.org/grpc/transport/testdata/server1.key new file mode 100644 index 000000000..ed00cd500 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/transport/testdata/server1.key @@ -0,0 +1,15 @@ +-----BEGIN RSA PRIVATE KEY----- +MIICWwIBAAKBgQDhwxUnKCwlSaWAwzOB2LSHVegJHv7DDWminTg4wzLLsf+LQ8nZ +bpjfn5vgIzxCuRh4Rp9QYM5FhfrJX9wcYawP/HTbJ7p7LVQO2QYAP+akMTHxgKuM +BzVV++3wWToKfVZUjFX8nfTfGMGwWAHJDnlEGnU4tl9UujoCV4ENJtzFoQIDAQAB +AoGAJ+6hpzNr24yTQZtFWQpDpEyFplddKJMOxDya3S9ppK3vTWrIITV2xNcucw7I +ceTbdyrGsyjsU0/HdCcIf9ym2jfmGLUwmyhltKVw0QYcFB0XLkc0nI5YvEYoeVDg +omZIXn1E3EW+sSIWSbkMu9bY2kstKXR2UZmMgWDtmBEPMaECQQD6yT4TAZM5hGBb +ciBKgMUP6PwOhPhOMPIvijO50Aiu6iuCV88l1QIy38gWVhxjNrq6P346j4IBg+kB +9alwpCODAkEA5nSnm9k6ykYeQWNS0fNWiRinCdl23A7usDGSuKKlm019iomJ/Rgd +MKDOp0q/2OostbteOWM2MRFf4jMH3wyVCwJAfAdjJ8szoNKTRSagSbh9vWygnB2v +IByc6l4TTuZQJRGzCveafz9lovuB3WohCABdQRd9ukCXL2CpsEpqzkafOQJAJUjc +USedDlq3zGZwYM1Yw8d8RuirBUFZNqJelYai+nRYClDkRVFgb5yksoYycbq5TxGo +VeqKOvgPpj4RWPHlLwJAGUMk3bqT91xBUCnLRs/vfoCpHpg6eywQTBDAV6xkyz4a +RH3I7/+yj3ZxR2JoWHgUwZ7lZk1VnhffFye7SBXyag== +-----END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/google.golang.org/grpc/transport/testdata/server1.pem b/Godeps/_workspace/src/google.golang.org/grpc/transport/testdata/server1.pem new file mode 100644 index 000000000..8e582e571 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/transport/testdata/server1.pem @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE----- +MIICmzCCAgSgAwIBAgIBAzANBgkqhkiG9w0BAQUFADBWMQswCQYDVQQGEwJBVTET +MBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQ +dHkgTHRkMQ8wDQYDVQQDDAZ0ZXN0Y2EwHhcNMTQwNzIyMDYwMDU3WhcNMjQwNzE5 +MDYwMDU3WjBkMQswCQYDVQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNV +BAcTB0NoaWNhZ28xFDASBgNVBAoTC0dvb2dsZSBJbmMuMRowGAYDVQQDFBEqLnRl +c3QuZ29vZ2xlLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA4cMVJygs +JUmlgMMzgdi0h1XoCR7+ww1pop04OMMyy7H/i0PJ2W6Y35+b4CM8QrkYeEafUGDO +RYX6yV/cHGGsD/x02ye6ey1UDtkGAD/mpDEx8YCrjAc1Vfvt8Fk6Cn1WVIxV/J30 +3xjBsFgByQ55RBp1OLZfVLo6AleBDSbcxaECAwEAAaNrMGkwCQYDVR0TBAIwADAL +BgNVHQ8EBAMCBeAwTwYDVR0RBEgwRoIQKi50ZXN0Lmdvb2dsZS5mcoIYd2F0ZXJ6 +b29pLnRlc3QuZ29vZ2xlLmJlghIqLnRlc3QueW91dHViZS5jb22HBMCoAQMwDQYJ +KoZIhvcNAQEFBQADgYEAM2Ii0LgTGbJ1j4oqX9bxVcxm+/R5Yf8oi0aZqTJlnLYS +wXcBykxTx181s7WyfJ49WwrYXo78zTDAnf1ma0fPq3e4mpspvyndLh1a+OarHa1e +aT0DIIYk7qeEa1YcVljx2KyLd0r1BBAfrwyGaEPVeJQVYWaOJRU2we/KD4ojf9s= +-----END CERTIFICATE----- diff --git a/Godeps/_workspace/src/google.golang.org/grpc/transport/transport.go b/Godeps/_workspace/src/google.golang.org/grpc/transport/transport.go new file mode 100644 index 000000000..e20a6d992 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/transport/transport.go @@ -0,0 +1,457 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* +Package transport defines and implements message oriented communication channel +to complete various transactions (e.g., an RPC). +*/ +package transport + +import ( + "bytes" + "errors" + "fmt" + "io" + "net" + "sync" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/metadata" +) + +// recvMsg represents the received msg from the transport. All transport +// protocol specific info has been removed. +type recvMsg struct { + data []byte + // nil: received some data + // io.EOF: stream is completed. data is nil. + // other non-nil error: transport failure. data is nil. + err error +} + +func (recvMsg) isItem() bool { + return true +} + +// All items in an out of a recvBuffer should be the same type. +type item interface { + isItem() bool +} + +// recvBuffer is an unbounded channel of item. +type recvBuffer struct { + c chan item + mu sync.Mutex + backlog []item +} + +func newRecvBuffer() *recvBuffer { + b := &recvBuffer{ + c: make(chan item, 1), + } + return b +} + +func (b *recvBuffer) put(r item) { + b.mu.Lock() + defer b.mu.Unlock() + b.backlog = append(b.backlog, r) + select { + case b.c <- b.backlog[0]: + b.backlog = b.backlog[1:] + default: + } +} + +func (b *recvBuffer) load() { + b.mu.Lock() + defer b.mu.Unlock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog = b.backlog[1:] + default: + } + } +} + +// get returns the channel that receives an item in the buffer. +// +// Upon receipt of an item, the caller should call load to send another +// item onto the channel if there is any. +func (b *recvBuffer) get() <-chan item { + return b.c +} + +// recvBufferReader implements io.Reader interface to read the data from +// recvBuffer. +type recvBufferReader struct { + ctx context.Context + recv *recvBuffer + last *bytes.Reader // Stores the remaining data in the previous calls. + err error +} + +// Read reads the next len(p) bytes from last. If last is drained, it tries to +// read additional data from recv. It blocks if there no additional data available +// in recv. If Read returns any non-nil error, it will continue to return that error. +func (r *recvBufferReader) Read(p []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + defer func() { r.err = err }() + if r.last != nil && r.last.Len() > 0 { + // Read remaining data left in last call. + return r.last.Read(p) + } + select { + case <-r.ctx.Done(): + return 0, ContextErr(r.ctx.Err()) + case i := <-r.recv.get(): + r.recv.load() + m := i.(*recvMsg) + if m.err != nil { + return 0, m.err + } + r.last = bytes.NewReader(m.data) + return r.last.Read(p) + } +} + +type streamState uint8 + +const ( + streamActive streamState = iota + streamWriteDone // EndStream sent + streamReadDone // EndStream received + streamDone // sendDone and recvDone or RSTStreamFrame is sent or received. +) + +// Stream represents an RPC in the transport layer. +type Stream struct { + id uint32 + // nil for client side Stream. + st ServerTransport + // ctx is the associated context of the stream. + ctx context.Context + cancel context.CancelFunc + // method records the associated RPC method of the stream. + method string + buf *recvBuffer + dec io.Reader + fc *inFlow + recvQuota uint32 + // The accumulated inbound quota pending for window update. + updateQuota uint32 + // The handler to control the window update procedure for both this + // particular stream and the associated transport. + windowHandler func(int) + + sendQuotaPool *quotaPool + // Close headerChan to indicate the end of reception of header metadata. + headerChan chan struct{} + // header caches the received header metadata. + header metadata.MD + // The key-value map of trailer metadata. + trailer metadata.MD + + mu sync.RWMutex // guard the following + // headerOK becomes true from the first header is about to send. + headerOk bool + state streamState + // true iff headerChan is closed. Used to avoid closing headerChan + // multiple times. + headerDone bool + // the status received from the server. + statusCode codes.Code + statusDesc string +} + +// Header acquires the key-value pairs of header metadata once it +// is available. It blocks until i) the metadata is ready or ii) there is no +// header metadata or iii) the stream is cancelled/expired. +func (s *Stream) Header() (metadata.MD, error) { + select { + case <-s.ctx.Done(): + return nil, ContextErr(s.ctx.Err()) + case <-s.headerChan: + return s.header.Copy(), nil + } +} + +// Trailer returns the cached trailer metedata. Note that if it is not called +// after the entire stream is done, it could return an empty MD. Client +// side only. +func (s *Stream) Trailer() metadata.MD { + s.mu.RLock() + defer s.mu.RUnlock() + return s.trailer.Copy() +} + +// ServerTransport returns the underlying ServerTransport for the stream. +// The client side stream always returns nil. +func (s *Stream) ServerTransport() ServerTransport { + return s.st +} + +// Context returns the context of the stream. +func (s *Stream) Context() context.Context { + return s.ctx +} + +// Method returns the method for the stream. +func (s *Stream) Method() string { + return s.method +} + +// StatusCode returns statusCode received from the server. +func (s *Stream) StatusCode() codes.Code { + return s.statusCode +} + +// StatusDesc returns statusDesc received from the server. +func (s *Stream) StatusDesc() string { + return s.statusDesc +} + +// ErrIllegalTrailerSet indicates that the trailer has already been set or it +// is too late to do so. +var ErrIllegalTrailerSet = errors.New("transport: trailer has been set") + +// SetTrailer sets the trailer metadata which will be sent with the RPC status +// by the server. This can only be called at most once. Server side only. +func (s *Stream) SetTrailer(md metadata.MD) error { + s.mu.Lock() + defer s.mu.Unlock() + if s.trailer != nil { + return ErrIllegalTrailerSet + } + s.trailer = md.Copy() + return nil +} + +func (s *Stream) write(m recvMsg) { + s.buf.put(&m) +} + +// Read reads all the data available for this Stream from the transport and +// passes them into the decoder, which converts them into a gRPC message stream. +// The error is io.EOF when the stream is done or another non-nil error if +// the stream broke. +func (s *Stream) Read(p []byte) (n int, err error) { + n, err = s.dec.Read(p) + if err != nil { + return + } + s.windowHandler(n) + return +} + +type key int + +// The key to save transport.Stream in the context. +const streamKey = key(0) + +// newContextWithStream creates a new context from ctx and attaches stream +// to it. +func newContextWithStream(ctx context.Context, stream *Stream) context.Context { + return context.WithValue(ctx, streamKey, stream) +} + +// StreamFromContext returns the stream saved in ctx. +func StreamFromContext(ctx context.Context) (s *Stream, ok bool) { + s, ok = ctx.Value(streamKey).(*Stream) + return +} + +// state of transport +type transportState int + +const ( + reachable transportState = iota + unreachable + closing +) + +// NewServerTransport creates a ServerTransport with conn or non-nil error +// if it fails. +func NewServerTransport(protocol string, conn net.Conn, maxStreams uint32, authInfo credentials.AuthInfo) (ServerTransport, error) { + return newHTTP2Server(conn, maxStreams, authInfo) +} + +// ConnectOptions covers all relevant options for dialing a server. +type ConnectOptions struct { + // UserAgent is the application user agent. + UserAgent string + // Dialer specifies how to dial a network address. + Dialer func(string, time.Duration) (net.Conn, error) + // AuthOptions stores the credentials required to setup a client connection and/or issue RPCs. + AuthOptions []credentials.Credentials + // Timeout specifies the timeout for dialing a client connection. + Timeout time.Duration +} + +// NewClientTransport establishes the transport with the required ConnectOptions +// and returns it to the caller. +func NewClientTransport(target string, opts *ConnectOptions) (ClientTransport, error) { + return newHTTP2Client(target, opts) +} + +// Options provides additional hints and information for message +// transmission. +type Options struct { + // Indicate whether it is the last piece for this stream. + Last bool + // The hint to transport impl whether the data could be buffered for + // batching write. Transport impl can feel free to ignore it. + Delay bool +} + +// CallHdr carries the information of a particular RPC. +type CallHdr struct { + Host string // peer host + Method string // the operation to perform on the specified host +} + +// ClientTransport is the common interface for all gRPC client side transport +// implementations. +type ClientTransport interface { + // Close tears down this transport. Once it returns, the transport + // should not be accessed any more. The caller must make sure this + // is called only once. + Close() error + + // Write sends the data for the given stream. A nil stream indicates + // the write is to be performed on the transport as a whole. + Write(s *Stream, data []byte, opts *Options) error + + // NewStream creates a Stream for an RPC. + NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) + + // CloseStream clears the footprint of a stream when the stream is + // not needed any more. The err indicates the error incurred when + // CloseStream is called. Must be called when a stream is finished + // unless the associated transport is closing. + CloseStream(stream *Stream, err error) + + // Error returns a channel that is closed when some I/O error + // happens. Typically the caller should have a goroutine to monitor + // this in order to take action (e.g., close the current transport + // and create a new one) in error case. It should not return nil + // once the transport is initiated. + Error() <-chan struct{} +} + +// ServerTransport is the common interface for all gRPC server side transport +// implementations. +type ServerTransport interface { + // WriteStatus sends the status of a stream to the client. + WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error + // Write sends the data for the given stream. + Write(s *Stream, data []byte, opts *Options) error + // WriteHeader sends the header metedata for the given stream. + WriteHeader(s *Stream, md metadata.MD) error + // HandleStreams receives incoming streams using the given handler. + HandleStreams(func(*Stream)) + // Close tears down the transport. Once it is called, the transport + // should not be accessed any more. All the pending streams and their + // handlers will be terminated asynchronously. + Close() error +} + +// StreamErrorf creates an StreamError with the specified error code and description. +func StreamErrorf(c codes.Code, format string, a ...interface{}) StreamError { + return StreamError{ + Code: c, + Desc: fmt.Sprintf(format, a...), + } +} + +// ConnectionErrorf creates an ConnectionError with the specified error description. +func ConnectionErrorf(format string, a ...interface{}) ConnectionError { + return ConnectionError{ + Desc: fmt.Sprintf(format, a...), + } +} + +// ConnectionError is an error that results in the termination of the +// entire connection and the retry of all the active streams. +type ConnectionError struct { + Desc string +} + +func (e ConnectionError) Error() string { + return fmt.Sprintf("connection error: desc = %q", e.Desc) +} + +// Define some common ConnectionErrors. +var ErrConnClosing = ConnectionError{Desc: "transport is closing"} + +// StreamError is an error that only affects one stream within a connection. +type StreamError struct { + Code codes.Code + Desc string +} + +func (e StreamError) Error() string { + return fmt.Sprintf("stream error: code = %d desc = %q", e.Code, e.Desc) +} + +// ContextErr converts the error from context package into a StreamError. +func ContextErr(err error) StreamError { + switch err { + case context.DeadlineExceeded: + return StreamErrorf(codes.DeadlineExceeded, "%v", err) + case context.Canceled: + return StreamErrorf(codes.Canceled, "%v", err) + } + panic(fmt.Sprintf("Unexpected error from context packet: %v", err)) +} + +// wait blocks until it can receive from ctx.Done, closing, or proceed. +// If it receives from ctx.Done, it returns 0, the StreamError for ctx.Err. +// If it receives from closing, it returns 0, ErrConnClosing. +// If it receives from proceed, it returns the received integer, nil. +func wait(ctx context.Context, closing <-chan struct{}, proceed <-chan int) (int, error) { + select { + case <-ctx.Done(): + return 0, ContextErr(ctx.Err()) + case <-closing: + return 0, ErrConnClosing + case i := <-proceed: + return i, nil + } +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/transport/transport_test.go b/Godeps/_workspace/src/google.golang.org/grpc/transport/transport_test.go new file mode 100644 index 000000000..ad5128841 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/transport/transport_test.go @@ -0,0 +1,594 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package transport + +import ( + "bytes" + "io" + "math" + "net" + "reflect" + "strconv" + "sync" + "testing" + "time" + + "github.com/bradfitz/http2" + "golang.org/x/net/context" + "google.golang.org/grpc/codes" +) + +type server struct { + lis net.Listener + port string + // channel to signal server is ready to serve. + readyChan chan bool + mu sync.Mutex + conns map[ServerTransport]bool +} + +var ( + expectedRequest = []byte("ping") + expectedResponse = []byte("pong") + expectedRequestLarge = make([]byte, initialWindowSize*2) + expectedResponseLarge = make([]byte, initialWindowSize*2) +) + +type testStreamHandler struct { + t ServerTransport +} + +type hType int + +const ( + normal hType = iota + suspended + misbehaved +) + +func (h *testStreamHandler) handleStream(t *testing.T, s *Stream) { + req := expectedRequest + resp := expectedResponse + if s.Method() == "foo.Large" { + req = expectedRequestLarge + resp = expectedResponseLarge + } + p := make([]byte, len(req)) + _, err := io.ReadFull(s, p) + if err != nil || !bytes.Equal(p, req) { + if err == ErrConnClosing { + return + } + t.Fatalf("handleStream got error: %v, want ; result: %v, want %v", err, p, req) + } + // send a response back to the client. + h.t.Write(s, resp, &Options{}) + // send the trailer to end the stream. + h.t.WriteStatus(s, codes.OK, "") +} + +// handleStreamSuspension blocks until s.ctx is canceled. +func (h *testStreamHandler) handleStreamSuspension(s *Stream) { + <-s.ctx.Done() +} + +func (h *testStreamHandler) handleStreamMisbehave(t *testing.T, s *Stream) { + conn, ok := s.ServerTransport().(*http2Server) + if !ok { + t.Fatalf("Failed to convert %v to *http2Server", s.ServerTransport()) + } + size := 1 + if s.Method() == "foo.MaxFrame" { + size = http2MaxFrameLen + } + // Drain the client side stream flow control window. + var sent int + for sent <= initialWindowSize { + <-conn.writableChan + if err := conn.framer.writeData(true, s.id, false, make([]byte, size)); err != nil { + conn.writableChan <- 0 + break + } + conn.writableChan <- 0 + sent += size + } +} + +// start starts server. Other goroutines should block on s.readyChan for futher operations. +func (s *server) start(t *testing.T, port int, maxStreams uint32, ht hType) { + var err error + if port == 0 { + s.lis, err = net.Listen("tcp", ":0") + } else { + s.lis, err = net.Listen("tcp", ":"+strconv.Itoa(port)) + } + if err != nil { + t.Fatalf("failed to listen: %v", err) + } + _, p, err := net.SplitHostPort(s.lis.Addr().String()) + if err != nil { + t.Fatalf("failed to parse listener address: %v", err) + } + s.port = p + s.conns = make(map[ServerTransport]bool) + if s.readyChan != nil { + close(s.readyChan) + } + for { + conn, err := s.lis.Accept() + if err != nil { + return + } + transport, err := NewServerTransport("http2", conn, maxStreams, nil) + if err != nil { + return + } + s.mu.Lock() + if s.conns == nil { + s.mu.Unlock() + transport.Close() + return + } + s.conns[transport] = true + s.mu.Unlock() + h := &testStreamHandler{transport} + switch ht { + case suspended: + go transport.HandleStreams(h.handleStreamSuspension) + case misbehaved: + go transport.HandleStreams(func(s *Stream) { + h.handleStreamMisbehave(t, s) + }) + default: + go transport.HandleStreams(func(s *Stream) { + h.handleStream(t, s) + }) + } + } +} + +func (s *server) wait(t *testing.T, timeout time.Duration) { + select { + case <-s.readyChan: + case <-time.After(timeout): + t.Fatalf("Timed out after %v waiting for server to be ready", timeout) + } +} + +func (s *server) stop() { + s.lis.Close() + s.mu.Lock() + for c := range s.conns { + c.Close() + } + s.conns = nil + s.mu.Unlock() +} + +func setUp(t *testing.T, port int, maxStreams uint32, ht hType) (*server, ClientTransport) { + server := &server{readyChan: make(chan bool)} + go server.start(t, port, maxStreams, ht) + server.wait(t, 2*time.Second) + addr := "localhost:" + server.port + var ( + ct ClientTransport + connErr error + ) + ct, connErr = NewClientTransport(addr, &ConnectOptions{}) + if connErr != nil { + t.Fatalf("failed to create transport: %v", connErr) + } + return server, ct +} + +func TestClientSendAndReceive(t *testing.T) { + server, ct := setUp(t, 0, math.MaxUint32, normal) + callHdr := &CallHdr{ + Host: "localhost", + Method: "foo.Small", + } + s1, err1 := ct.NewStream(context.Background(), callHdr) + if err1 != nil { + t.Fatalf("failed to open stream: %v", err1) + } + if s1.id != 1 { + t.Fatalf("wrong stream id: %d", s1.id) + } + s2, err2 := ct.NewStream(context.Background(), callHdr) + if err2 != nil { + t.Fatalf("failed to open stream: %v", err2) + } + if s2.id != 3 { + t.Fatalf("wrong stream id: %d", s2.id) + } + opts := Options{ + Last: true, + Delay: false, + } + if err := ct.Write(s1, expectedRequest, &opts); err != nil { + t.Fatalf("failed to send data: %v", err) + } + p := make([]byte, len(expectedResponse)) + _, recvErr := io.ReadFull(s1, p) + if recvErr != nil || !bytes.Equal(p, expectedResponse) { + t.Fatalf("Error: %v, want ; Result: %v, want %v", recvErr, p, expectedResponse) + } + _, recvErr = io.ReadFull(s1, p) + if recvErr != io.EOF { + t.Fatalf("Error: %v; want ", recvErr) + } + ct.Close() + server.stop() +} + +func TestClientErrorNotify(t *testing.T) { + server, ct := setUp(t, 0, math.MaxUint32, normal) + go server.stop() + // ct.reader should detect the error and activate ct.Error(). + <-ct.Error() + ct.Close() +} + +func performOneRPC(ct ClientTransport) { + callHdr := &CallHdr{ + Host: "localhost", + Method: "foo.Small", + } + s, err := ct.NewStream(context.Background(), callHdr) + if err != nil { + return + } + opts := Options{ + Last: true, + Delay: false, + } + if err := ct.Write(s, expectedRequest, &opts); err == nil { + time.Sleep(5 * time.Millisecond) + // The following s.Recv()'s could error out because the + // underlying transport is gone. + // + // Read response + p := make([]byte, len(expectedResponse)) + io.ReadFull(s, p) + // Read io.EOF + io.ReadFull(s, p) + } +} + +func TestClientMix(t *testing.T) { + s, ct := setUp(t, 0, math.MaxUint32, normal) + go func(s *server) { + time.Sleep(5 * time.Second) + s.stop() + }(s) + go func(ct ClientTransport) { + <-ct.Error() + ct.Close() + }(ct) + for i := 0; i < 1000; i++ { + time.Sleep(10 * time.Millisecond) + go performOneRPC(ct) + } +} + +func TestLargeMessage(t *testing.T) { + server, ct := setUp(t, 0, math.MaxUint32, normal) + callHdr := &CallHdr{ + Host: "localhost", + Method: "foo.Large", + } + var wg sync.WaitGroup + for i := 0; i < 2; i++ { + wg.Add(1) + go func() { + defer wg.Done() + s, err := ct.NewStream(context.Background(), callHdr) + if err != nil { + t.Errorf("failed to open stream: %v", err) + } + if err := ct.Write(s, expectedRequestLarge, &Options{Last: true, Delay: false}); err != nil { + t.Errorf("failed to send data: %v", err) + } + p := make([]byte, len(expectedResponseLarge)) + _, recvErr := io.ReadFull(s, p) + if recvErr != nil || !bytes.Equal(p, expectedResponseLarge) { + t.Errorf("Error: %v, want ; Result len: %d, want len %d", recvErr, len(p), len(expectedResponseLarge)) + } + _, recvErr = io.ReadFull(s, p) + if recvErr != io.EOF { + t.Errorf("Error: %v; want ", recvErr) + } + }() + } + wg.Wait() + ct.Close() + server.stop() +} + +func TestLargeMessageSuspension(t *testing.T) { + server, ct := setUp(t, 0, math.MaxUint32, suspended) + callHdr := &CallHdr{ + Host: "localhost", + Method: "foo.Large", + } + // Set a long enough timeout for writing a large message out. + ctx, _ := context.WithTimeout(context.Background(), time.Second) + s, err := ct.NewStream(ctx, callHdr) + if err != nil { + t.Fatalf("failed to open stream: %v", err) + } + // Write should not be done successfully due to flow control. + err = ct.Write(s, expectedRequestLarge, &Options{Last: true, Delay: false}) + expectedErr := StreamErrorf(codes.DeadlineExceeded, "%v", context.DeadlineExceeded) + if err == nil || err != expectedErr { + t.Fatalf("Write got %v, want %v", err, expectedErr) + } + ct.Close() + server.stop() +} + +func TestMaxStreams(t *testing.T) { + server, ct := setUp(t, 0, 1, suspended) + callHdr := &CallHdr{ + Host: "localhost", + Method: "foo.Large", + } + // Have a pending stream which takes all streams quota. + s, err := ct.NewStream(context.Background(), callHdr) + if err != nil { + t.Fatalf("Failed to open stream: %v", err) + } + cc, ok := ct.(*http2Client) + if !ok { + t.Fatalf("Failed to convert %v to *http2Client", ct) + } + done := make(chan struct{}) + ch := make(chan int) + go func() { + for { + select { + case <-time.After(5 * time.Millisecond): + ch <- 0 + case <-time.After(5 * time.Second): + close(done) + return + } + } + }() + for { + select { + case <-ch: + case <-done: + t.Fatalf("Client has not received the max stream setting in 5 seconds.") + } + cc.mu.Lock() + // cc.streamsQuota should be initialized once receiving the 1st setting frame from + // the server. + if cc.streamsQuota != nil { + cc.mu.Unlock() + select { + case <-cc.streamsQuota.acquire(): + t.Fatalf("streamsQuota.acquire() becomes readable mistakenly.") + default: + if cc.streamsQuota.quota != 0 { + t.Fatalf("streamsQuota.quota got non-zero quota mistakenly.") + } + } + break + } + cc.mu.Unlock() + } + // Close the pending stream so that the streams quota becomes available for the next new stream. + ct.CloseStream(s, nil) + select { + case i := <-cc.streamsQuota.acquire(): + if i != 1 { + t.Fatalf("streamsQuota.acquire() got %d quota, want 1.", i) + } + cc.streamsQuota.add(i) + default: + t.Fatalf("streamsQuota.acquire() is not readable.") + } + if _, err := ct.NewStream(context.Background(), callHdr); err != nil { + t.Fatalf("Failed to open stream: %v", err) + } + ct.Close() + server.stop() +} + +func TestServerWithMisbehavedClient(t *testing.T) { + server, ct := setUp(t, 0, math.MaxUint32, suspended) + callHdr := &CallHdr{ + Host: "localhost", + Method: "foo", + } + var sc *http2Server + // Wait until the server transport is setup. + for { + server.mu.Lock() + if len(server.conns) == 0 { + server.mu.Unlock() + time.Sleep(time.Millisecond) + continue + } + for k := range server.conns { + var ok bool + sc, ok = k.(*http2Server) + if !ok { + t.Fatalf("Failed to convert %v to *http2Server", k) + } + } + server.mu.Unlock() + break + } + cc, ok := ct.(*http2Client) + if !ok { + t.Fatalf("Failed to convert %v to *http2Client", ct) + } + // Test server behavior for violation of stream flow control window size restriction. + s, err := ct.NewStream(context.Background(), callHdr) + if err != nil { + t.Fatalf("Failed to open stream: %v", err) + } + var sent int + // Drain the stream flow control window + <-cc.writableChan + if err = cc.framer.writeData(true, s.id, false, make([]byte, http2MaxFrameLen)); err != nil { + t.Fatalf("Failed to write data: %v", err) + } + cc.writableChan <- 0 + sent += http2MaxFrameLen + // Wait until the server creates the corresponding stream and receive some data. + var ss *Stream + for { + time.Sleep(time.Millisecond) + sc.mu.Lock() + if len(sc.activeStreams) == 0 { + sc.mu.Unlock() + continue + } + ss = sc.activeStreams[s.id] + sc.mu.Unlock() + ss.fc.mu.Lock() + if ss.fc.pendingData > 0 { + ss.fc.mu.Unlock() + break + } + ss.fc.mu.Unlock() + } + if ss.fc.pendingData != http2MaxFrameLen || ss.fc.pendingUpdate != 0 || sc.fc.pendingData != http2MaxFrameLen || sc.fc.pendingUpdate != 0 { + t.Fatalf("Server mistakenly updates inbound flow control params: got %d, %d, %d, %d; want %d, %d, %d, %d", ss.fc.pendingData, ss.fc.pendingUpdate, sc.fc.pendingData, sc.fc.pendingUpdate, http2MaxFrameLen, 0, http2MaxFrameLen, 0) + } + // Keep sending until the server inbound window is drained for that stream. + for sent <= initialWindowSize { + <-cc.writableChan + if err = cc.framer.writeData(true, s.id, false, make([]byte, 1)); err != nil { + t.Fatalf("Failed to write data: %v", err) + } + cc.writableChan <- 0 + sent++ + } + // Server sent a resetStream for s already. + code := http2RSTErrConvTab[http2.ErrCodeFlowControl] + if _, err := io.ReadFull(s, make([]byte, 1)); err != io.EOF || s.statusCode != code { + t.Fatalf("%v got err %v with statusCode %d, want err with statusCode %d", s, err, s.statusCode, code) + } + + if ss.fc.pendingData != 0 || ss.fc.pendingUpdate != 0 || sc.fc.pendingData != 0 || sc.fc.pendingUpdate != initialWindowSize { + t.Fatalf("Server mistakenly resets inbound flow control params: got %d, %d, %d, %d; want 0, 0, 0, %d", ss.fc.pendingData, ss.fc.pendingUpdate, sc.fc.pendingData, sc.fc.pendingUpdate, initialWindowSize) + } + ct.CloseStream(s, nil) + // Test server behavior for violation of connection flow control window size restriction. + // + // Keep creating new streams until the connection window is drained on the server and + // the server tears down the connection. + for { + s, err := ct.NewStream(context.Background(), callHdr) + if err != nil { + // The server tears down the connection. + break + } + <-cc.writableChan + cc.framer.writeData(true, s.id, true, make([]byte, http2MaxFrameLen)) + cc.writableChan <- 0 + } + ct.Close() + server.stop() +} + +func TestClientWithMisbehavedServer(t *testing.T) { + server, ct := setUp(t, 0, math.MaxUint32, misbehaved) + callHdr := &CallHdr{ + Host: "localhost", + Method: "foo", + } + conn, ok := ct.(*http2Client) + if !ok { + t.Fatalf("Failed to convert %v to *http2Client", ct) + } + // Test the logic for the violation of stream flow control window size restriction. + s, err := ct.NewStream(context.Background(), callHdr) + if err != nil { + t.Fatalf("Failed to open stream: %v", err) + } + if err := ct.Write(s, expectedRequest, &Options{Last: true, Delay: false}); err != nil { + t.Fatalf("Failed to write: %v", err) + } + // Read without window update. + for { + p := make([]byte, http2MaxFrameLen) + if _, err = s.dec.Read(p); err != nil { + break + } + } + if s.fc.pendingData != initialWindowSize || s.fc.pendingUpdate != 0 || conn.fc.pendingData != initialWindowSize || conn.fc.pendingUpdate != 0 { + t.Fatalf("Client mistakenly updates inbound flow control params: got %d, %d, %d, %d; want %d, %d, %d, %d", s.fc.pendingData, s.fc.pendingUpdate, conn.fc.pendingData, conn.fc.pendingUpdate, initialWindowSize, 0, initialWindowSize, 0) + } + if err != io.EOF || s.statusCode != codes.Internal { + t.Fatalf("Got err %v and the status code %d, want and the code %d", err, s.statusCode, codes.Internal) + } + conn.CloseStream(s, err) + if s.fc.pendingData != 0 || s.fc.pendingUpdate != 0 || conn.fc.pendingData != 0 || conn.fc.pendingUpdate != initialWindowSize { + t.Fatalf("Client mistakenly resets inbound flow control params: got %d, %d, %d, %d; want 0, 0, 0, %d", s.fc.pendingData, s.fc.pendingUpdate, conn.fc.pendingData, conn.fc.pendingUpdate, initialWindowSize) + } + // Test the logic for the violation of the connection flow control window size restriction. + // + // Generate enough streams to drain the connection window. + callHdr = &CallHdr{ + Host: "localhost", + Method: "foo.MaxFrame", + } + for i := 0; i < int(initialConnWindowSize/initialWindowSize+10); i++ { + s, err := ct.NewStream(context.Background(), callHdr) + if err != nil { + break + } + if err := ct.Write(s, expectedRequest, &Options{Last: true, Delay: false}); err != nil { + break + } + } + // http2Client.errChan is closed due to connection flow control window size violation. + <-conn.Error() + ct.Close() + server.stop() +} + +func TestStreamContext(t *testing.T) { + expectedStream := Stream{} + ctx := newContextWithStream(context.Background(), &expectedStream) + s, ok := StreamFromContext(ctx) + if !ok || !reflect.DeepEqual(expectedStream, *s) { + t.Fatalf("GetStreamFromContext(%v) = %v, %t, want: %v, true", ctx, *s, ok, expectedStream) + } +}