Repository: gophergala/ImgurGo Branch: master Commit: fedf337a57e2 Files: 424 Total size: 3.0 MB Directory structure: gitextract_yns78iqd/ ├── .buildpacks ├── .gitignore ├── .travis.yml ├── Dockerfile ├── Godeps/ │ ├── Godeps.json │ └── Readme ├── LICENSE ├── Procfile ├── README.md ├── app.json ├── config/ │ ├── config.go │ └── default.conf.json ├── docker/ │ ├── build_gm.sh │ ├── conf.json │ └── meme.traineddata ├── goclean.sh ├── imageprocessor/ │ ├── compresslosslessly.go │ ├── exifstripper.go │ ├── imageorienter.go │ ├── imageprocessor.go │ ├── imagescaler.go │ ├── ocr.go │ ├── ocr_test.go │ ├── processorcommand/ │ │ ├── gm.go │ │ ├── jpegtran.go │ │ ├── ocrcommands.go │ │ ├── optipng.go │ │ ├── runner.go │ │ └── stripmetadata.go │ └── thumbType/ │ └── thumbType.go ├── imagestore/ │ ├── factory.go │ ├── gcsstore.go │ ├── hash.go │ ├── localstore.go │ ├── memorystore.go │ ├── namepathmapper.go │ ├── s3store.go │ ├── store.go │ └── storeobject.go ├── main.go ├── server/ │ ├── authenticator.go │ ├── authenticator_test.go │ ├── server.go │ ├── server_test.go │ └── stats.go ├── uploadedfile/ │ ├── thumbfile.go │ └── uploadedfile.go └── vendor/ ├── github.com/ │ ├── PagerDuty/ │ │ └── godspeed/ │ │ ├── .gitignore │ │ ├── .travis.yml │ │ ├── LICENSE │ │ ├── README.md │ │ ├── async.go │ │ ├── events.go │ │ ├── godspeed.go │ │ ├── service_checks.go │ │ ├── shared.go │ │ └── stats.go │ ├── bradfitz/ │ │ └── http2/ │ │ ├── .gitignore │ │ ├── AUTHORS │ │ ├── CONTRIBUTORS │ │ ├── Dockerfile │ │ ├── HACKING │ │ ├── LICENSE │ │ ├── Makefile │ │ ├── README │ │ ├── buffer.go │ │ ├── errors.go │ │ ├── flow.go │ │ ├── frame.go │ │ ├── gotrack.go │ │ ├── h2i/ │ │ │ ├── README.md │ │ │ └── h2i.go │ │ ├── headermap.go │ │ ├── hpack/ │ │ │ ├── encode.go │ │ │ ├── hpack.go │ │ │ ├── huffman.go │ │ │ └── tables.go │ │ ├── http2.go │ │ ├── pipe.go │ │ ├── server.go │ │ ├── transport.go │ │ ├── write.go │ │ └── writesched.go │ ├── golang/ │ │ ├── glog/ │ │ │ ├── LICENSE │ │ │ ├── README │ │ │ ├── glog.go │ │ │ └── glog_file.go │ │ └── protobuf/ │ │ ├── LICENSE │ │ └── proto/ │ │ ├── Makefile │ │ ├── clone.go │ │ ├── decode.go │ │ ├── encode.go │ │ ├── equal.go │ │ ├── extensions.go │ │ ├── lib.go │ │ ├── message_set.go │ │ ├── pointer_reflect.go │ │ ├── pointer_unsafe.go │ │ ├── properties.go │ │ ├── proto3_proto/ │ │ │ ├── proto3.pb.go │ │ │ └── proto3.proto │ │ ├── text.go │ │ └── text_parser.go │ ├── gorilla/ │ │ ├── context/ │ │ │ ├── .travis.yml │ │ │ ├── LICENSE │ │ │ ├── README.md │ │ │ ├── context.go │ │ │ └── doc.go │ │ └── mux/ │ │ ├── .travis.yml │ │ ├── LICENSE │ │ ├── README.md │ │ ├── doc.go │ │ ├── mux.go │ │ ├── regexp.go │ │ └── route.go │ ├── mitchellh/ │ │ └── goamz/ │ │ ├── LICENSE │ │ ├── aws/ │ │ │ ├── attempt.go │ │ │ ├── aws.go │ │ │ └── client.go │ │ └── s3/ │ │ ├── multi.go │ │ ├── s3.go │ │ ├── s3test/ │ │ │ └── server.go │ │ └── sign.go │ ├── trustmaster/ │ │ └── go-aspell/ │ │ ├── README.md │ │ └── aspell.go │ └── vaughan0/ │ └── go-ini/ │ ├── LICENSE │ ├── README.md │ ├── ini.go │ └── test.ini ├── golang.org/ │ └── x/ │ ├── crypto/ │ │ ├── LICENSE │ │ ├── PATENTS │ │ └── ssh/ │ │ └── terminal/ │ │ ├── terminal.go │ │ ├── util.go │ │ ├── util_bsd.go │ │ ├── util_linux.go │ │ └── util_windows.go │ ├── net/ │ │ ├── LICENSE │ │ ├── PATENTS │ │ └── context/ │ │ └── context.go │ └── oauth2/ │ ├── .travis.yml │ ├── AUTHORS │ ├── CONTRIBUTING.md │ ├── CONTRIBUTORS │ ├── LICENSE │ ├── README.md │ ├── client_appengine.go │ ├── clientcredentials/ │ │ └── clientcredentials.go │ ├── facebook/ │ │ └── facebook.go │ ├── github/ │ │ └── github.go │ ├── google/ │ │ ├── appengine.go │ │ ├── appengine_hook.go │ │ ├── default.go │ │ ├── google.go │ │ └── sdk.go │ ├── internal/ │ │ ├── oauth2.go │ │ ├── token.go │ │ └── transport.go │ ├── jws/ │ │ └── jws.go │ ├── jwt/ │ │ └── jwt.go │ ├── linkedin/ │ │ └── linkedin.go │ ├── oauth2.go │ ├── odnoklassniki/ │ │ └── odnoklassniki.go │ ├── paypal/ │ │ └── paypal.go │ ├── token.go │ ├── transport.go │ └── vk/ │ └── vk.go └── google.golang.org/ ├── api/ │ ├── LICENSE │ ├── bigquery/ │ │ └── v2/ │ │ ├── bigquery-api.json │ │ └── bigquery-gen.go │ ├── container/ │ │ └── v1beta1/ │ │ ├── container-api.json │ │ └── container-gen.go │ ├── googleapi/ │ │ ├── googleapi.go │ │ ├── internal/ │ │ │ └── uritemplates/ │ │ │ ├── LICENSE │ │ │ ├── uritemplates.go │ │ │ └── utils.go │ │ ├── transport/ │ │ │ └── apikey.go │ │ └── types.go │ ├── pubsub/ │ │ └── v1beta2/ │ │ ├── pubsub-api.json │ │ └── pubsub-gen.go │ └── storage/ │ └── v1/ │ ├── storage-api.json │ └── storage-gen.go ├── appengine/ │ ├── .travis.yml │ ├── LICENSE │ ├── README.md │ ├── aetest/ │ │ ├── doc.go │ │ ├── instance.go │ │ ├── instance_classic.go │ │ ├── instance_vm.go │ │ └── user.go │ ├── appengine.go │ ├── appengine_vm.go │ ├── blobstore/ │ │ ├── blobstore.go │ │ └── read.go │ ├── capability/ │ │ └── capability.go │ ├── channel/ │ │ └── channel.go │ ├── cloudsql/ │ │ ├── cloudsql.go │ │ ├── cloudsql_classic.go │ │ └── cloudsql_vm.go │ ├── cmd/ │ │ ├── aebundler/ │ │ │ └── aebundler.go │ │ └── aedeploy/ │ │ └── aedeploy.go │ ├── datastore/ │ │ ├── datastore.go │ │ ├── doc.go │ │ ├── key.go │ │ ├── load.go │ │ ├── metadata.go │ │ ├── prop.go │ │ ├── query.go │ │ ├── save.go │ │ └── transaction.go │ ├── delay/ │ │ └── delay.go │ ├── demos/ │ │ ├── guestbook/ │ │ │ ├── app.yaml │ │ │ ├── guestbook.go │ │ │ ├── index.yaml │ │ │ └── templates/ │ │ │ └── guestbook.html │ │ └── helloworld/ │ │ ├── app.yaml │ │ └── helloworld.go │ ├── errors.go │ ├── file/ │ │ └── file.go │ ├── identity.go │ ├── image/ │ │ └── image.go │ ├── internal/ │ │ ├── aetesting/ │ │ │ └── fake.go │ │ ├── api.go │ │ ├── api_classic.go │ │ ├── api_common.go │ │ ├── app_id.go │ │ ├── app_identity/ │ │ │ ├── app_identity_service.pb.go │ │ │ └── app_identity_service.proto │ │ ├── base/ │ │ │ ├── api_base.pb.go │ │ │ └── api_base.proto │ │ ├── blobstore/ │ │ │ ├── blobstore_service.pb.go │ │ │ └── blobstore_service.proto │ │ ├── capability/ │ │ │ ├── capability_service.pb.go │ │ │ └── capability_service.proto │ │ ├── channel/ │ │ │ ├── channel_service.pb.go │ │ │ └── channel_service.proto │ │ ├── datastore/ │ │ │ ├── datastore_v3.pb.go │ │ │ └── datastore_v3.proto │ │ ├── identity.go │ │ ├── identity_classic.go │ │ ├── identity_vm.go │ │ ├── image/ │ │ │ ├── images_service.pb.go │ │ │ └── images_service.proto │ │ ├── internal.go │ │ ├── log/ │ │ │ ├── log_service.pb.go │ │ │ └── log_service.proto │ │ ├── mail/ │ │ │ ├── mail_service.pb.go │ │ │ └── mail_service.proto │ │ ├── memcache/ │ │ │ ├── memcache_service.pb.go │ │ │ └── memcache_service.proto │ │ ├── metadata.go │ │ ├── modules/ │ │ │ ├── modules_service.pb.go │ │ │ └── modules_service.proto │ │ ├── net.go │ │ ├── regen.sh │ │ ├── remote_api/ │ │ │ ├── remote_api.pb.go │ │ │ └── remote_api.proto │ │ ├── search/ │ │ │ ├── search.pb.go │ │ │ └── search.proto │ │ ├── socket/ │ │ │ ├── socket_service.pb.go │ │ │ └── socket_service.proto │ │ ├── system/ │ │ │ ├── system_service.pb.go │ │ │ └── system_service.proto │ │ ├── taskqueue/ │ │ │ ├── taskqueue_service.pb.go │ │ │ └── taskqueue_service.proto │ │ ├── transaction.go │ │ ├── urlfetch/ │ │ │ ├── urlfetch_service.pb.go │ │ │ └── urlfetch_service.proto │ │ ├── user/ │ │ │ ├── user_service.pb.go │ │ │ └── user_service.proto │ │ └── xmpp/ │ │ ├── xmpp_service.pb.go │ │ └── xmpp_service.proto │ ├── log/ │ │ ├── api.go │ │ └── log.go │ ├── mail/ │ │ └── mail.go │ ├── memcache/ │ │ └── memcache.go │ ├── module/ │ │ └── module.go │ ├── namespace.go │ ├── remote_api/ │ │ ├── client.go │ │ └── remote_api.go │ ├── runtime/ │ │ └── runtime.go │ ├── search/ │ │ ├── doc.go │ │ ├── field.go │ │ ├── search.go │ │ └── struct.go │ ├── socket/ │ │ ├── doc.go │ │ ├── socket_classic.go │ │ └── socket_vm.go │ ├── taskqueue/ │ │ └── taskqueue.go │ ├── timeout.go │ ├── urlfetch/ │ │ └── urlfetch.go │ ├── user/ │ │ ├── oauth.go │ │ ├── user.go │ │ ├── user_classic.go │ │ └── user_vm.go │ └── xmpp/ │ └── xmpp.go ├── cloud/ │ ├── .travis.yml │ ├── AUTHORS │ ├── CONTRIBUTING.md │ ├── CONTRIBUTORS │ ├── LICENSE │ ├── README.md │ ├── bigquery/ │ │ ├── bigquery.go │ │ ├── copy_op.go │ │ ├── doc.go │ │ ├── error.go │ │ ├── extract_op.go │ │ ├── gcs.go │ │ ├── iterator.go │ │ ├── job.go │ │ ├── load_op.go │ │ ├── query.go │ │ ├── query_op.go │ │ ├── read_op.go │ │ ├── schema.go │ │ ├── service.go │ │ ├── table.go │ │ └── value.go │ ├── bigtable/ │ │ ├── admin.go │ │ ├── bigtable.go │ │ ├── bttest/ │ │ │ └── inmem.go │ │ ├── cmd/ │ │ │ └── cbt/ │ │ │ ├── cbt.go │ │ │ └── cbtdoc.go │ │ ├── doc.go │ │ ├── filter.go │ │ ├── internal/ │ │ │ ├── cluster_data_proto/ │ │ │ │ ├── bigtable_cluster_data.pb.go │ │ │ │ └── bigtable_cluster_data.proto │ │ │ ├── cluster_service_proto/ │ │ │ │ ├── bigtable_cluster_service.pb.go │ │ │ │ ├── bigtable_cluster_service.proto │ │ │ │ ├── bigtable_cluster_service_messages.pb.go │ │ │ │ └── bigtable_cluster_service_messages.proto │ │ │ ├── data_proto/ │ │ │ │ ├── bigtable_data.pb.go │ │ │ │ └── bigtable_data.proto │ │ │ ├── empty/ │ │ │ │ ├── empty.pb.go │ │ │ │ └── empty.proto │ │ │ ├── regen.sh │ │ │ ├── service_proto/ │ │ │ │ ├── bigtable_service.pb.go │ │ │ │ ├── bigtable_service.proto │ │ │ │ ├── bigtable_service_messages.pb.go │ │ │ │ └── bigtable_service_messages.proto │ │ │ ├── table_data_proto/ │ │ │ │ ├── bigtable_table_data.pb.go │ │ │ │ └── bigtable_table_data.proto │ │ │ └── table_service_proto/ │ │ │ ├── bigtable_table_service.pb.go │ │ │ ├── bigtable_table_service.proto │ │ │ ├── bigtable_table_service_messages.pb.go │ │ │ └── bigtable_table_service_messages.proto │ │ └── sample/ │ │ └── search.go │ ├── cloud.go │ ├── compute/ │ │ └── metadata/ │ │ └── metadata.go │ ├── container/ │ │ └── container.go │ ├── datastore/ │ │ ├── datastore.go │ │ ├── errors.go │ │ ├── key.go │ │ ├── load.go │ │ ├── prop.go │ │ ├── query.go │ │ ├── save.go │ │ ├── time.go │ │ └── transaction.go │ ├── examples/ │ │ ├── bigquery/ │ │ │ ├── concat_table/ │ │ │ │ └── main.go │ │ │ ├── load/ │ │ │ │ └── main.go │ │ │ ├── query/ │ │ │ │ └── main.go │ │ │ └── read/ │ │ │ └── main.go │ │ ├── pubsub/ │ │ │ └── cmdline/ │ │ │ └── main.go │ │ └── storage/ │ │ ├── appengine/ │ │ │ ├── app.go │ │ │ └── app.yaml │ │ └── appenginevm/ │ │ ├── app.go │ │ └── app.yaml │ ├── internal/ │ │ ├── cloud.go │ │ ├── datastore/ │ │ │ ├── datastore_v1.pb.go │ │ │ └── datastore_v1.proto │ │ └── testutil/ │ │ └── context.go │ ├── key.json.enc │ ├── option.go │ ├── pubsub/ │ │ └── pubsub.go │ └── storage/ │ ├── acl.go │ ├── storage.go │ └── types.go └── grpc/ ├── .travis.yml ├── CONTRIBUTING.md ├── LICENSE ├── PATENTS ├── README.md ├── benchmark/ │ ├── benchmark.go │ ├── client/ │ │ └── main.go │ ├── grpc_testing/ │ │ ├── test.pb.go │ │ └── test.proto │ ├── server/ │ │ └── main.go │ └── stats/ │ ├── counter.go │ ├── histogram.go │ ├── stats.go │ ├── timeseries.go │ ├── tracker.go │ └── util.go ├── call.go ├── clientconn.go ├── codegen.sh ├── codes/ │ ├── code_string.go │ └── codes.go ├── credentials/ │ └── credentials.go ├── doc.go ├── examples/ │ └── route_guide/ │ ├── README.md │ ├── client/ │ │ └── client.go │ ├── proto/ │ │ ├── route_guide.pb.go │ │ └── route_guide.proto │ └── server/ │ └── server.go ├── grpc-auth-support.md ├── grpclog/ │ └── logger.go ├── interop/ │ ├── client/ │ │ └── client.go │ ├── grpc_testing/ │ │ ├── test.pb.go │ │ └── test.proto │ └── server/ │ └── server.go ├── metadata/ │ └── metadata.go ├── rpc_util.go ├── server.go ├── stream.go ├── test/ │ ├── codec_perf/ │ │ ├── perf.pb.go │ │ └── perf.proto │ └── grpc_testing/ │ ├── test.pb.go │ └── test.proto └── transport/ ├── control.go ├── http2_client.go ├── http2_server.go ├── http_util.go └── transport.go ================================================ FILE CONTENTS ================================================ ================================================ FILE: .buildpacks ================================================ https://github.com/mcollina/heroku-buildpack-graphicsmagick https://github.com/kr/heroku-buildpack-go.git ================================================ FILE: .gitignore ================================================ # Compiled Object files, Static and Dynamic libs (Shared Objects) *.o *.a *.so # Folders _obj _test # Architecture specific extensions/prefixes *.[568vq] [568vq].out *.cgo1.go *.cgo2.c _cgo_defun.c _cgo_gotypes.go _cgo_export.* _testmain.go *.exe *.test *.prof ImgurGo tags *.sw[o-p] profile.cov ================================================ FILE: .travis.yml ================================================ sudo: required services: - docker before_install: - docker build -t imgur/mandible . script: - docker run -e "COVERALLS_TOKEN=$COVERALLS_TOKEN" -e "TRAVIS_JOB_ID=$TRAVIS_JOB_ID" imgur/mandible /bin/sh -c "cd /go/src/github.com/Imgur/mandible && ./goclean.sh" ================================================ FILE: Dockerfile ================================================ FROM golang:1.8-stretch RUN apt-get update && apt-get install -yqq aspell aspell-en libaspell-dev tesseract-ocr tesseract-ocr-eng libc6 optipng exiftool libjpeg-progs webp ADD docker/build_gm.sh /tmp/build_gm.sh RUN bash /tmp/build_gm.sh ADD docker/meme.traineddata /usr/share/tesseract-ocr/tessdata/meme.traineddata RUN mkdir -p /etc/mandible /tmp/imagestore ENV MANDIBLE_CONF /etc/mandible/conf.json ADD . /go/src/github.com/Imgur/mandible WORKDIR /go/src/github.com/Imgur/mandible RUN go get github.com/mattn/goveralls RUN go get github.com/tools/godep RUN godep restore RUN godep go install -v . CMD ["mandible"] ================================================ FILE: Godeps/Godeps.json ================================================ { "ImportPath": "github.com/Imgur/mandible", "GoVersion": "go1.5", "Packages": [ "./..." ], "Deps": [ { "ImportPath": "github.com/bradfitz/http2", "Rev": "f8202bc903bda493ebba4aa54922d78430c2c42f" }, { "ImportPath": "github.com/golang/glog", "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" }, { "ImportPath": "github.com/golang/protobuf/proto", "Rev": "34a5f244f1c01cdfee8e60324258cfbb97a42aec" }, { "ImportPath": "github.com/gorilla/context", "Rev": "215affda49addc4c8ef7e2534915df2c8c35c6cd" }, { "ImportPath": "github.com/gorilla/mux", "Rev": "47e8f450ef38c857cdd922ec08862ca9d65a1c6d" }, { "ImportPath": "github.com/mitchellh/goamz/aws", "Rev": "2441a8d0fab90553ec345cfdf3db24bb61ea61c3" }, { "ImportPath": "github.com/mitchellh/goamz/s3", "Rev": "2441a8d0fab90553ec345cfdf3db24bb61ea61c3" }, { "ImportPath": "github.com/trustmaster/go-aspell", "Rev": "b1cc0c2c49f83195f1708a1e6d23967d94817296" }, { "ImportPath": "github.com/vaughan0/go-ini", "Rev": "a98ad7ee00ec53921f08832bc06ecf7fd600e6a1" }, { "ImportPath": "golang.org/x/crypto/ssh/terminal", "Rev": "3760e016850398b85094c4c99e955b8c3dea5711" }, { "ImportPath": "golang.org/x/net/context", "Rev": "84afb0af0050ae286aa9ced0c29383c2a866a925" }, { "ImportPath": "golang.org/x/oauth2", "Rev": "b5adcc2dcdf009d0391547edc6ecbaff889f5bb9" }, { "ImportPath": "google.golang.org/api/bigquery/v2", "Rev": "0610a35668fd6881bec389e74208f0df92010e96" }, { "ImportPath": "google.golang.org/api/container/v1beta1", "Rev": "0610a35668fd6881bec389e74208f0df92010e96" }, { "ImportPath": "google.golang.org/api/googleapi", "Rev": "0610a35668fd6881bec389e74208f0df92010e96" }, { "ImportPath": "google.golang.org/api/pubsub/v1beta2", "Rev": "0610a35668fd6881bec389e74208f0df92010e96" }, { "ImportPath": "google.golang.org/api/storage/v1", "Rev": "0610a35668fd6881bec389e74208f0df92010e96" }, { "ImportPath": "google.golang.org/appengine", "Rev": "6bde959377a90acb53366051d7d587bfd7171354" }, { "ImportPath": "google.golang.org/cloud", "Rev": "0b21ed5434dc279f2b8ea3c02dc69135600bbb8b" }, { "ImportPath": "google.golang.org/grpc", "Rev": "d6f8134fd2e79a0a2a40f284d5552065fb6a8e3c" } ] } ================================================ FILE: Godeps/Readme ================================================ This directory tree is generated automatically by godep. Please do not edit. See https://github.com/tools/godep for more information. ================================================ FILE: LICENSE ================================================ The MIT License (MIT) Copyright (c) 2015 Imgur, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: Procfile ================================================ web: ImgurGo ================================================ FILE: README.md ================================================ # mandible ![TravisStatus](https://travis-ci.org/Imgur/mandible.svg) [![Coverage Status](https://coveralls.io/repos/Imgur/mandible/badge.svg)](https://coveralls.io/r/Imgur/mandible) A ready-to-deploy uploader that you can run on AWS EC2 or Heroku. It accepts an image via a REST interface and returns information about a file. Also, supports processing steps such as compression and thumbnail generation. [![Deploy](https://www.herokucdn.com/deploy/button.png)](https://heroku.com/deploy) ## Features: Supported file types - JPG - PNG - GIF Pluggable storage layers - S3 - Local Pluggable authentication scheme - Time-grant HMAC Processing Steps: - Compression - Thumbnail generation ## Installation ### Docker Pull down the mandible config file and edit it: ``` wget https://raw.githubusercontent.com/Imgur/mandible/master/config/default.conf.json -O ~/mandible/conf.json ``` ``` vim ~/mandible/conf.json ``` To start mandible (port settings could change based on your conf.json): ``` docker run --name mandible -v ~/mandible:/etc/mandible -d -p 80:8080 imgur/mandible ``` To stop mandible: ``` docker stop mandible ``` To run it again: ``` docker run mandible ``` ### (Optional) Authentication - Set the following environment variable - AUTHENTICATION_HMAC_KEY ### S3 Storage Layer Add the following to the `Stores` array in your conf.json file: ``` { "Type" : "s3", "BucketName" : "", "AWSKey": "", "AWSSecret": "", "StoreRoot" : "", "Region" : "us-east-1", "NamePathRegex" : "", "NamePathMap" : "${ImageSize}/${ImageName}" } ``` ## REST API: Interfacing with mandible is extremely simple: ### Upload an image file: `POST /file` with the following multi-part/form-data - ```image``` - file --- ### Upload an image from a URL: `POST /url` with the following multi-part/form-data - ```image``` - string --- ### Upload an image from base64 data: `POST /base64` with the following multi-part/form-data - ```image``` - image encoded as base64 data --- ### Thumbnail generation during upload: **To generate thumbnails with an upload request, pass the following JSON as form-data, keyed under `thumbs`** ```javascript { "name1": { "width": x, "height": y, "shape": ("square" | "thumb" | "circle") }, "name2": { "width": x2, "height": y2, "shape": ("square" | "thumb" | "circle") }, ... } ``` Note: Square thumbnails don't preserve aspect ratio, whereas the 'thumb' type does --- ### On the fly thumbnail generation: **this will return `content-type: image/...` and serve up a thumbnail.** `GET /thumbnail` with the following get parameters: - ```uid``` - Unique ID of the image - ```thumbs``` - JSON of the following format: ```javascript { "name for the thumbnail": { "shape": ("square" | "thumb" | "circle" | "custom") // required "width": int, "height": int, "max_width": int, "max_height": int, "crop_gravity": string, // e.g. "nw" for north west of the image "crop_height": int, "crop_width": int, "quaity": int, "crop_ratio": string, // e.g. "2:1" "format": string, // one of: jpg, png, gif, webm, "nostore": bool, // if true, the resulting thumbnail won't be added to the backing storage } } ``` --- ### OCR endpoint **Runs OCR on the given image and returns text** `GET /ocr` with the following get parameters: - ```uid``` - Unique ID of the image returns: ```Javascript { "hash": string, //uid of the image "ocrtext": string // text returned from OCR } ``` ## Example usage (assuming localhost) ### URL Upload with thumbnails: ``` curl -i http://127.0.0.1:8080/url \ -d 'image=http://i.imgur.com/s9zxmYe.jpg' \ -d 'thumbs={"small": {"width": 20, "height": 20, "shape": "square"}, "profile": {"width": 50, "height": 50, "shape": "circle"}}' ``` ### Response: ```javascript { "data": { "width": 380, "height": 430, "link": "https://s3.amazonaws.com/gophergala/original/CUqU4If", "mime": "image/jpeg", "name": "", "size": 82199, "thumbs": { "profile":"https://s3.amazonaws.com/gophergala/t/CUqU4If/profile", "small": "https://s3.amazonaws.com/gophergala/t/CUqU4If/small" } }, "status": 200, "success": true } ``` ### File Upload with thumbnails: ``` curl -i http://127.0.0.1:8080/file \ -F 'image=@/tmp/cat.gif' \ -F 'thumbs={"small": {"width": 20, "height": 20, "shape": "square"}}' ``` ### Response: ```javascript { "data": { "width": 354, "height": 200, "link": "https://s3.amazonaws.com/gophergala/original/L4ASjMX", "mime": "image/gif", "name": "cat.gif", "size": 3511100, "thumbs": { "small":"https://s3.amazonaws.com/gophergala/t/L4ASjMX/small" } }, "status": 200, "success": true } ``` ### Authenticated upload Uses HTTP headers `Authentication` and `X-Authentication-HMAC`. Generate HMACs by base64-encoding a JSON blob like below. [Example MAC generator](http://play.golang.org/p/3otGr8LBZt). Supplying the client with the Authentication blob and MAC is out of scope for this project. In the future we will support symmetric and asymmetric encryption of the authentication blobs. #### Request to my own account with proper authorization: ``` curl -i http://127.0.0.1:8080/user/1/url \ -d 'image=http://i.imgur.com/s9zxmYe.jpg' \ -H 'Authorization: {"user_id":1,"grant_time":"2010-06-01T00:00:00Z","grant_duration_sec":31536000}' \ -H 'X-Authorization-HMAC: tCtGb04n4nvd/94+Xd6vAx9+pJw51ZmX1vH7E+BlTtc=' ``` #### Response: ```javascript {"data":{"link":"/tmp/original/J/a/Jafq9IH","mime":"image/jpeg","name":"s9zxmYe.jpg","hash":"Jafq9IH","size":81881,"width":380,"height":430,"ocrtext":"change\np.roject .\n\n \n \n\n forg@ot to git p.ull before\n- .-+#~+):,-r,ad)q..,i,ng so/ /","thumbs":{},"user_id":"\u0001"},"status":200,"success":true} ``` #### Request to other user's account: ``` curl -i http://127.0.0.1:8080/user/2/url \ -d 'image=http://i.imgur.com/s9zxmYe.jpg' \ -H 'Authorization: {"user_id":1,"grant_time":"2010-06-01T00:00:00Z","grant_duration_sec":31536000}' \ -H 'X-Authorization-HMAC: tCtGb04n4nvd/94+Xd6vAx9+pJw51ZmX1vH7E+BlTtc=' ``` #### Response: ``` HTTP/1.1 401 Unauthorized Date: Mon, 08 Jun 2015 21:04:41 GMT Content-Length: 0 Content-Type: text/plain; charset=utf-8 ``` #### HMAC prevents account forgery ``` curl -i http://127.0.0.1:8080/user/1/url \ -d 'image=http://i.imgur.com/s9zxmYe.jpg' \ -H 'Authorization: {"user_id":1,"grant_time":"2010-06-01T00:00:00Z","grant_duration_sec":31536000}' \ -H 'X-Authorization-HMAC: foobar' ``` #### Response: ```javascript HTTP/1.1 401 Unauthorized Date: Mon, 08 Jun 2015 21:04:41 GMT Content-Length: 0 Content-Type: text/plain; charset=utf-8 ``` ## Contributing The easiest way to develop on this project is to use the built-in docker image. We are using the Go 1.5 vendor experiment, which means if you import a package you must vendor the source code into this repository using Godep. ================================================ FILE: app.json ================================================ { "name": "ImgurGo", "description": "An easy Heroku image uploading service", "repository": "https://github.com/gophergala/ImgurGo", "env": { "BUILDPACK_URL": "https://github.com/ddollar/heroku-buildpack-multi", "IMGUR_GO_CONF": "config/default.conf.json", "S3_BUCKET": { "description": "AWS S3 Bucket", "required": false }, "AWS_ACCESS_KEY_ID": { "description": "AWS Acess Key ID", "required": false }, "AWS_SECRET_ACCESS_KEY": { "description": "AWS Acess Key Secret", "required": false } } } ================================================ FILE: config/config.go ================================================ package config import ( "encoding/json" "fmt" "os" ) type Configuration struct { MaxFileSize int64 HashLength int UserAgent string Stores []map[string]string Port int DatadogEnabled bool DatadogHostname string } func NewConfiguration(path string) *Configuration { file, err := os.Open(path) if err != nil { fmt.Printf("Error opening config file!") os.Exit(-1) } decoder := json.NewDecoder(file) configuration := &Configuration{} err = decoder.Decode(configuration) if err != nil { fmt.Println("Error loading config file: ", err) } return configuration } ================================================ FILE: config/default.conf.json ================================================ { "Port": 8080, "MaxFileSize": 20971520, "HashLength": 7, "UserAgent": "ImgurGo (https://github.com/gophergala/ImgurGo)", "Stores" : [ { "Type" : "s3", "BucketName" : "", "AWSKey": "", "AWSSecret": "", "StoreRoot" : "", "Region" : "us-east-1", "NamePathRegex" : "", "NamePathMap" : "${ImageSize}/${ImageName}" }, { "Type" : "gcs", "BucketName" : "", "StoreRoot" : "", "AppID" : "", "KeyFile" : "appid.json", "NamePathRegex" : "", "NamePathMap" : "${ImageSize}/${ImageName}" }, { "Type" : "local", "StoreRoot": "/Users/jarvis/imagestore", "NamePathRegex" : "^([a-zA-Z0-9])([a-zA-Z0-9]).*", "NamePathMap" : "${ImageSize}/${1}/${2}/${ImageName}" } ], "DatadogEnabled": false, "DatadogHostname": "127.0.0.1" } ================================================ FILE: docker/build_gm.sh ================================================ #!/bin/bash apt-get install -y libjpeg-dev liblcms2-dev libwmf-dev libx11-dev libsm-dev libice-dev libxext-dev x11proto-core-dev libxml2-dev libfreetype6-dev libexif-dev libbz2-dev libtiff-dev libjbig-dev zlib1g-dev libpng-dev libwebp-dev ghostscript gsfonts autotools-dev transfig sharutils libltdl-dev mercurial cmake wget "http://www.ece.uvic.ca/~frodo/jasper/software/jasper-2.0.12.tar.gz" -O jasper.tar.gz mkdir jasper && tar -xvzf jasper.tar.gz -C jasper --strip-components 1 && cd jasper mkdir BUILD && cd BUILD && cmake -DCMAKE_INSTALL_PREFIX=/usr \ -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_SKIP_INSTALL_RPATH=YES \ -DCMAKE_INSTALL_DOCDIR=/usr/share/doc/jasper-2.0.10 \ .. && make make install cd ../.. && rm -rf jasper jasper.tar.gz hg clone http://hg.code.sf.net/p/graphicsmagick/code GM cd GM hg update -r tip CC="gcc" CFLAGS="-fopenmp -Wall -g -fno-strict-aliasing -O3 -Wall -pthread" CPPFLAGS="-I/usr/include/X11 -I/usr/include/freetype2 -I/usr/include/libxml2" CXX="g++" CXXFLAGS="-Wall -g -fno-strict-aliasing -O3 -pthread" LDFLAGS="-L/usr/lib/X11 -L/usr/lib/x86_64-linux-gnu" LIBS="-ljbig -lwebp -llcms2 -ltiff -lfreetype -ljpeg -lpng16 -lwmflite -lXext -lSM -lICE -lX11 -llzma -lbz2 -lxml2 -lz -lm -lgomp -lpthread" ./configure '--build' 'x86_64-linux-gnu' '--enable-shared' '--enable-static' '--enable-libtool-verbose' '--prefix=/usr' '--mandir=${prefix}/share/man' '--infodir=${prefix}/share/info' '--docdir=${prefix}/share/doc/graphicsmagick' '--with-gs-font-dir=/usr/share/fonts/type1/gsfonts' '--with-x' '--x-includes=/usr/include/X11' '--x-libraries=/usr/lib/X11' '--with-included-ltdl' '--with-modules' '--enable-openmp-slow' '--without-dps' '--without-frozenpaths' '--with-webp' '--with-perl' '--with-perl-options=INSTALLDIRS=vendor' '--enable-quantum-library-names' '--with-quantum-depth=16' 'build_alias=x86_64-linux-gnu' 'CFLAGS=-Wall -g -fno-strict-aliasing -O3' 'LDFLAGS=' 'CXXFLAGS=-Wall -g -fno-strict-aliasing -O3' make make install cd .. && rm -rf GM ================================================ FILE: docker/conf.json ================================================ { "Port": 8080, "MaxFileSize": 20971520, "HashLength": 7, "UserAgent": "Mandible (https://github.com/Imgur/Mandible)", "Stores" : [ { "Type" : "local", "StoreRoot": "/tmp/imagestore", "NamePathRegex" : "^([a-zA-Z0-9])([a-zA-Z0-9]).*", "NamePathMap" : "${ImageSize}/${1}/${2}/${ImageName}" } ] } ================================================ FILE: goclean.sh ================================================ #!/bin/bash # The script does automatic checking on a Go package and its sub-packages, including: # 1. gofmt (http://golang.org/cmd/gofmt/) # 2. goimports (https://github.com/bradfitz/goimports) # 3. golint (https://github.com/golang/lint) # 4. go vet (http://golang.org/cmd/vet) # 5. race detector (http://blog.golang.org/race-detector) # 6. test coverage (http://blog.golang.org/cover) export GO15VENDOREXPERIMENT=1 set -e PROJECTS="./uploadedfile ./server ./imageprocessor ./imagestore ./config ." # Automatic checks test -z "$(gofmt -l -w . | tee /dev/stderr)" # test -z "$(goimports -l -w . | tee /dev/stderr)" # test -z "$(golint . | tee /dev/stderr)" godep go vet $PROJECTS godep go test -race $PROJECTS # Run test coverage on each subdirectories and merge the coverage profile. echo "mode: count" > profile.cov # Standard go tooling behavior is to ignore dirs with leading underscors for dir in $PROJECTS do if ls $dir/*.go &> /dev/null; then godep go test -covermode=count -coverprofile=$dir/profile.tmp $dir if [ -f $dir/profile.tmp ] then cat $dir/profile.tmp | tail -n +2 >> profile.cov rm $dir/profile.tmp fi fi done godep go tool cover -func profile.cov # This is breaking travis-ci. Disabling it for now. # [ ${COVERALLS_TOKEN} ] && goveralls -coverprofile=profile.cov -service travis-ci -repotoken $COVERALLS_TOKEN ================================================ FILE: imageprocessor/compresslosslessly.go ================================================ package imageprocessor import ( "errors" "github.com/Imgur/mandible/imageprocessor/processorcommand" "github.com/Imgur/mandible/uploadedfile" ) type CompressLosslessly struct{} func (this *CompressLosslessly) Process(image *uploadedfile.UploadedFile) error { if image.IsJpeg() { return this.compressJpeg(image) } if image.IsPng() { return this.compressPng(image) } if image.IsGif() { return nil } return errors.New("Unsuported filetype") } func (this *CompressLosslessly) String() string { return "Lossy compressor" } func (this *CompressLosslessly) compressPng(image *uploadedfile.UploadedFile) error { filename, err := processorcommand.Optipng(image.GetPath()) if err != nil { return err } image.SetPath(filename) return nil } func (this *CompressLosslessly) compressJpeg(image *uploadedfile.UploadedFile) error { filename, err := processorcommand.Jpegtran(image.GetPath()) if err != nil { return err } image.SetPath(filename) return nil } ================================================ FILE: imageprocessor/exifstripper.go ================================================ package imageprocessor import ( "github.com/Imgur/mandible/imageprocessor/processorcommand" "github.com/Imgur/mandible/uploadedfile" ) type ExifStripper struct{} func (this *ExifStripper) Process(image *uploadedfile.UploadedFile) error { if !image.IsJpeg() { return nil } err := processorcommand.StripMetadata(image.GetPath()) if err != nil { return err } return nil } func (this *ExifStripper) String() string { return "EXIF stripper" } ================================================ FILE: imageprocessor/imageorienter.go ================================================ package imageprocessor import ( "github.com/Imgur/mandible/imageprocessor/processorcommand" "github.com/Imgur/mandible/uploadedfile" ) type ImageOrienter struct{} func (this *ImageOrienter) Process(image *uploadedfile.UploadedFile) error { filename, err := processorcommand.FixOrientation(image.GetPath()) if err != nil { return err } image.SetPath(filename) return nil } func (this *ImageOrienter) String() string { return "Image orienter" } ================================================ FILE: imageprocessor/imageprocessor.go ================================================ package imageprocessor import ( "fmt" "strings" "github.com/Imgur/mandible/config" "github.com/Imgur/mandible/uploadedfile" ) type ProcessType interface { Process(image *uploadedfile.UploadedFile) error String() string } type multiProcessType []ProcessType func (this multiProcessType) Process(image *uploadedfile.UploadedFile) error { for _, processor := range this { err := processor.Process(image) if err != nil { return fmt.Errorf("Error multiprocessing on %s: %s", processor.String(), err.Error()) } } return nil } func (this multiProcessType) String() string { processes := make([]string, 0) for _, p := range this { processes = append(processes, p.String()) } return "Multiple processes <" + strings.Join(processes, ", ") + ">" } type asyncProcessType []ProcessType func (this asyncProcessType) Process(image *uploadedfile.UploadedFile) error { errs := make(chan error, len(this)) for _, processor := range this { go func(p ProcessType) { err := p.Process(image) if err != nil { errs <- fmt.Errorf("Error asynchronously processing on %s: %s", p.String(), err.Error()) } else { errs <- nil } }(processor) } for i := 0; i < len(this); i++ { select { case err := <-errs: if err != nil { return err } } } return nil } func (this asyncProcessType) String() string { processes := make([]string, 0) for _, p := range this { processes = append(processes, p.String()) } return "Async processes <" + strings.Join(processes, ", ") + ">" } type ImageProcessor struct { processor ProcessType } func (this *ImageProcessor) Run(image *uploadedfile.UploadedFile) error { return this.processor.Process(image) } type ImageProcessorStrategy func(*config.Configuration, *uploadedfile.UploadedFile) (*ImageProcessor, error) // Just do nothing to the file after it's uploaded... var PassthroughStrategy = func(cfg *config.Configuration, file *uploadedfile.UploadedFile) (*ImageProcessor, error) { return &ImageProcessor{multiProcessType{}}, nil } var ThumbnailStrategy = func(cfg *config.Configuration, file *uploadedfile.UploadedFile) (*ImageProcessor, error) { processor := asyncProcessType{} for _, t := range file.GetThumbs() { processor = append(processor, t) } return &ImageProcessor{processor}, nil } var EverythingStrategy = func(cfg *config.Configuration, file *uploadedfile.UploadedFile) (*ImageProcessor, error) { size, err := file.FileSize() if err != nil { return &ImageProcessor{}, err } processor := multiProcessType{} processor = append(processor, &ImageOrienter{}) processor = append(processor, &CompressLosslessly{}) processor = append(processor, &ExifStripper{}) if size > cfg.MaxFileSize { processor = append(processor, &ImageScaler{cfg.MaxFileSize}) } async := asyncProcessType{} async = append(async, DuelOCRStratagy()) for _, t := range file.GetThumbs() { async = append(async, t) } if len(async) > 0 { processor = append(processor, async) } return &ImageProcessor{processor}, nil } ================================================ FILE: imageprocessor/imagescaler.go ================================================ package imageprocessor import ( "errors" "github.com/Imgur/mandible/imageprocessor/processorcommand" "github.com/Imgur/mandible/uploadedfile" ) type ImageScaler struct { targetSize int64 } func (this *ImageScaler) Process(image *uploadedfile.UploadedFile) error { switch image.GetMime() { case "image/jpeg", "image/jpg": return this.scaleJpeg(image) case "image/png": return this.scalePng(image) case "image/gif": return this.scaleGif(image) } return errors.New("Unsuported filetype") } func (this *ImageScaler) String() string { return "Image scaler" } func (this *ImageScaler) scalePng(image *uploadedfile.UploadedFile) error { filename, err := processorcommand.ConvertToJpeg(image.GetPath()) if err != nil { return err } image.SetPath(filename) image.SetMime("image/jpeg") return this.scaleJpeg(image) } func (this *ImageScaler) scaleJpeg(image *uploadedfile.UploadedFile) error { filename, err := processorcommand.Quality(image.GetPath(), 90) if err != nil { return err } image.SetPath(filename) size, err := image.FileSize() if size < this.targetSize { return nil } filename, err = processorcommand.Quality(image.GetPath(), 70) if err != nil { return err } image.SetPath(filename) size, err = image.FileSize() if size < this.targetSize { return nil } percent := 90 if (size - this.targetSize) >= (15 * 1024 * 1024) { percent = 30 } else if (size - this.targetSize) >= (10 * 1024 * 1024) { percent = 40 } else if (size - this.targetSize) >= (5 * 1024 * 1024) { percent = 60 } for { filename, err = processorcommand.ResizePercent(image.GetPath(), percent) if err != nil { return err } image.SetPath(filename) size, err := image.FileSize() if err != nil { return err } else if size == 0 || percent < 10 { return errors.New("Could not scale image to desired filesize") } else if size < this.targetSize { return nil } percent -= 10 } } func (this *ImageScaler) scaleGif(image *uploadedfile.UploadedFile) error { return errors.New("Unimplimented") } ================================================ FILE: imageprocessor/ocr.go ================================================ package imageprocessor import ( "github.com/Imgur/mandible/imageprocessor/processorcommand" "github.com/Imgur/mandible/uploadedfile" "log" ) type OCRRunner struct { Command processorcommand.OCRCommand } func (this *OCRRunner) Process(image *uploadedfile.UploadedFile) error { result, err := this.Command.Run(image.GetPath()) if err != nil { log.Printf("Error running OCR: %s", err.Error()) return err } image.SetOCRText(result.Text) return nil } func (this *OCRRunner) String() string { return "OCR runner" } var DuelOCRStratagy = func() *OCRRunner { multi := processorcommand.MultiOCRCommand{} multi = append(multi, processorcommand.NewMemeOCR()) multi = append(multi, processorcommand.NewStandardOCR()) return &OCRRunner{multi} } var StandardOCRStratagy = func() *OCRRunner { return &OCRRunner{processorcommand.NewStandardOCR()} } var MemeOCRStratagy = func() *OCRRunner { return &OCRRunner{processorcommand.NewMemeOCR()} } ================================================ FILE: imageprocessor/ocr_test.go ================================================ package imageprocessor import ( "errors" "io" "io/ioutil" "os" "testing" "github.com/Imgur/mandible/uploadedfile" ) func TestStandardOCR(t *testing.T) { image, err := getUploadedFileObject() if err != nil { t.Fatalf("Could not initialize standard OCR test") } defer image.Clean() ocrStratagy := StandardOCRStratagy() ocrStratagy.Process(image) if image.GetOCRText() != "hello" { t.Fatalf("Did not get proper standard OCR text back %s != hello", image.GetOCRText()) } } func getUploadedFileObject() (*uploadedfile.UploadedFile, error) { filename, err := copyTestImage("testdata/ocrtestimage.png") if err != nil { return nil, err } image, err := uploadedfile.NewUploadedFile("ocrtestimage.png", filename, nil) if err != nil { return nil, errors.New("Could not initialize standard OCR test") } return image, nil } func copyTestImage(filename string) (string, error) { uploadFile, err := os.Open(filename) if err != nil { return "", err } defer uploadFile.Close() tmpFile, err := ioutil.TempFile(os.TempDir(), "image") if err != nil { return "", errors.New("Unable to write to /tmp") } defer tmpFile.Close() _, err = io.Copy(tmpFile, uploadFile) if err != nil { return "", err } return tmpFile.Name(), nil } ================================================ FILE: imageprocessor/processorcommand/gm.go ================================================ package processorcommand import ( "fmt" "github.com/Imgur/mandible/imageprocessor/thumbType" ) const GM_COMMAND = "gm" func ConvertToJpeg(filename string) (string, error) { outfile := fmt.Sprintf("%s_jpg", filename) args := []string{ "convert", filename, "-flatten", "JPEG:" + outfile, } err := runProcessorCommand(GM_COMMAND, args) if err != nil { return "", err } return outfile, nil } func FixOrientation(filename string) (string, error) { outfile := fmt.Sprintf("%s_ort", filename) args := []string{ "convert", filename, "-auto-orient", outfile, } err := runProcessorCommand(GM_COMMAND, args) if err != nil { return "", err } return outfile, nil } func Quality(filename string, quality int) (string, error) { outfile := fmt.Sprintf("%s_q", filename) args := []string{ "convert", filename, "-quality", fmt.Sprintf("%d", quality), "-density", "72x72", outfile, } err := runProcessorCommand(GM_COMMAND, args) if err != nil { return "", err } return outfile, nil } func ResizePercent(filename string, percent int) (string, error) { outfile := fmt.Sprintf("%s_rp", filename) args := []string{ "convert", filename, "-resize", fmt.Sprintf("%d%%", percent), outfile, } err := runProcessorCommand(GM_COMMAND, args) if err != nil { return "", err } return outfile, nil } func SquareThumb(filename, name string, size int, quality int, format thumbType.ThumbType) (string, error) { outfile := fmt.Sprintf("%s_%s", filename, name) args := []string{ "convert", fmt.Sprintf("%s[0]", filename), "-resize", fmt.Sprintf("%dx%d^", size, size), "-gravity", "center", "-crop", fmt.Sprintf("%dx%d+0+0", size, size), "-density", "72x72", "-unsharp", "0.5", } if quality >= 0 { args = append(args, "-quality", fmt.Sprintf("%d", quality), ) } args = append(args, fmt.Sprintf("%s:%s", format.ToString(), outfile)) err := runProcessorCommand(GM_COMMAND, args) if err != nil { return "", err } return outfile, nil } func Thumb(filename, name string, width, height int, quality int, format thumbType.ThumbType) (string, error) { outfile := fmt.Sprintf("%s_%s", filename, name) args := []string{ "convert", fmt.Sprintf("%s[0]", filename), "-resize", fmt.Sprintf("%dx%d>", width, height), "-density", "72x72", } if quality >= 0 { args = append(args, "-quality", fmt.Sprintf("%d", quality), ) } args = append(args, fmt.Sprintf("%s:%s", format.ToString(), outfile)) err := runProcessorCommand(GM_COMMAND, args) if err != nil { return "", err } return outfile, nil } func CircleThumb(filename, name string, width int, quality int, format thumbType.ThumbType) (string, error) { outfile := fmt.Sprintf("%s_%s", filename, name) filename, err := SquareThumb(filename, name, width, quality, format) if err != nil { return "", err } args := []string{ "convert", "-size", fmt.Sprintf("%dx%d", width, width), "xc:none", "-fill", filename, "-quality", "83", "-density", "72x72", "-draw", fmt.Sprintf("circle %d,%d %d,1", width/2, width/2, width/2), } if quality >= 0 { args = append(args, "-quality", fmt.Sprintf("%d", quality), ) } args = append(args, fmt.Sprintf("PNG:%s", outfile)) err = runProcessorCommand(GM_COMMAND, args) if err != nil { return "", err } return outfile, nil } func CustomThumb(filename, name string, width, height int, cropGravity string, cropWidth, cropHeight, quality int, format thumbType.ThumbType) (string, error) { outfile := fmt.Sprintf("%s_%s", filename, name) args := []string{ "convert", fmt.Sprintf("%s[0]", filename), "-resize", fmt.Sprintf("%dx%d^", width, height), "-density", "72x72", } if quality != -1 { args = append(args, "-quality", fmt.Sprintf("%d", quality), ) } if cropGravity != "" { args = append(args, "-gravity", fmt.Sprintf("%s", cropGravity), "-crop", fmt.Sprintf("%dx%d+0+0", cropWidth, cropHeight), ) } args = append(args, fmt.Sprintf("%s:%s", format.ToString(), outfile)) err := runProcessorCommand(GM_COMMAND, args) if err != nil { return "", err } return outfile, nil } func Full(filename string, name string, quality int, format thumbType.ThumbType) (string, error) { outfile := fmt.Sprintf("%s_%s", filename, name) args := []string{ "convert", fmt.Sprintf("%s[0]", filename), "-density", "72x72", } if quality >= 0 { args = append(args, "-quality", fmt.Sprintf("%d", quality), ) } args = append(args, fmt.Sprintf("%s:%s", format.ToString(), outfile)) err := runProcessorCommand(GM_COMMAND, args) if err != nil { return "", err } return outfile, nil } ================================================ FILE: imageprocessor/processorcommand/jpegtran.go ================================================ package processorcommand import ( "fmt" ) func Jpegtran(filename string) (string, error) { outfile := fmt.Sprintf("%s_opti", filename) args := []string{ "-copy", "all", "-optimize", "-outfile", outfile, filename, } err := runProcessorCommand("jpegtran", args) if err != nil { return "", err } return outfile, nil } ================================================ FILE: imageprocessor/processorcommand/ocrcommands.go ================================================ package processorcommand import ( "errors" "fmt" "io/ioutil" "os" "regexp" "strings" "github.com/trustmaster/go-aspell" ) type OCRResult struct { Type string Text string } func newOCRResult(ocrType string, result string) *OCRResult { return &OCRResult{ ocrType, result, } } func (this *OCRResult) removeNonWords() { blob := this.Text speller, err := aspell.NewSpeller(map[string]string{ "lang": "en_US", }) if err != nil { fmt.Printf("Error: %s", err.Error()) return } defer speller.Delete() singleCharWords := regexp.MustCompile("(a|i)") numberRegex := regexp.MustCompile("\\d{3,}") wordRegexp := regexp.MustCompile("\\b(\\w+)\\b") words := wordRegexp.FindAllString(blob, -1) str := "" for _, word := range words { if numberRegex.MatchString(word) { str += " " + word } else if len(word) == 1 { if singleCharWords.MatchString(word) { str += " " + word } } else if speller.Check(word) { str += " " + word } } this.Text = strings.TrimSpace(str) } func (this *OCRResult) wordCount(blob string) int { word_regexp := regexp.MustCompile("\\b(\\w+)\\b") words := word_regexp.FindAllString(blob, -1) // don't let single char words count towards the overal word count. Gets thrown off by poor OCR results count := 0 for _, word := range words { if len(word) > 1 { count++ } } return count } type MultiOCRCommand []OCRCommand func (this MultiOCRCommand) Run(image string) (*OCRResult, error) { results := make(chan *OCRResult, len(this)) errs := make(chan error, len(this)) for _, command := range this { go func(c OCRCommand) { k, err := c.Run(image) if err != nil { errs <- err return } results <- k }(command) } max := -1 var best *OCRResult for i := 0; i < len(this); i++ { select { case result := <-results: result.removeNonWords() count := result.wordCount(result.Text) if count > max { best = result max = count } case err := <-errs: return nil, err } } // Return the average, same as before. return best, nil } type OCRCommand interface { Run(image string) (*OCRResult, error) } type MemeOCR struct { name string } func NewMemeOCR() *MemeOCR { return &MemeOCR{ "MemeOCR", } } func (this *MemeOCR) Run(image string) (*OCRResult, error) { imageTif := fmt.Sprintf("%s_meme.jpg", image) outText := fmt.Sprintf("%s_meme", image) inImage := fmt.Sprintf("%s[0]", image) preprocessingArgs := []string{"convert", inImage, "-resize", "400%", "-fill", "black", "-fuzz", "10%", "+matte", "-matte", "-transparent", "white", imageTif} tesseractArgs := []string{"-l", "meme", imageTif, outText} err := runProcessorCommand(GM_COMMAND, preprocessingArgs) if err != nil { return nil, errors.New(fmt.Sprintf("Meme preprocessing command failed with error = %v", err)) } defer os.Remove(imageTif) err = runProcessorCommand("tesseract", tesseractArgs) if err != nil { return nil, errors.New(fmt.Sprintf("Meme tesseract command failed with error = %v", err)) } defer os.Remove(outText + ".txt") text, err := ioutil.ReadFile(outText + ".txt") if err != nil { return nil, err } result := strings.ToLower(strings.TrimSpace(string(text[:]))) return newOCRResult(this.name, result), nil } type StandardOCR struct { name string } func NewStandardOCR() *StandardOCR { return &StandardOCR{ "StandardOCR", } } func (this *StandardOCR) Run(image string) (*OCRResult, error) { imageTif := fmt.Sprintf("%s_standard.jpg", image) outText := fmt.Sprintf("%s_standard", image) inImage := fmt.Sprintf("%s[0]", image) preprocessingArgs := []string{"convert", inImage, "-resize", "400%", "-type", "Grayscale", imageTif} tesseractArgs := []string{"-l", "eng", imageTif, outText} err := runProcessorCommand(GM_COMMAND, preprocessingArgs) if err != nil { return nil, errors.New(fmt.Sprintf("Standard preprocessing command failed with error = %v", err)) } defer os.Remove(imageTif) err = runProcessorCommand("tesseract", tesseractArgs) if err != nil { return nil, errors.New(fmt.Sprintf("Standard tesseract command failed with error = %v", err)) } defer os.Remove(outText + ".txt") text, err := ioutil.ReadFile(outText + ".txt") if err != nil { return nil, err } result := strings.ToLower(strings.TrimSpace(string(text[:]))) return newOCRResult(this.name, result), nil } ================================================ FILE: imageprocessor/processorcommand/optipng.go ================================================ package processorcommand import ( "fmt" ) func Optipng(filename string) (string, error) { outfile := fmt.Sprintf("%s_opi", filename) args := []string{ "-fix", "-out", outfile, filename, } err := runProcessorCommand("optipng", args) if err != nil { return "", err } return outfile, nil } ================================================ FILE: imageprocessor/processorcommand/runner.go ================================================ package processorcommand import ( "bytes" "errors" "log" "os/exec" "time" ) func runProcessorCommand(command string, args []string) error { cmd := exec.Command(command, args...) var out bytes.Buffer var stderr bytes.Buffer cmd.Stdout = &out cmd.Stderr = &stderr cmd.Start() cmdDone := make(chan error, 1) go func() { cmdDone <- cmd.Wait() }() select { case <-time.After(time.Duration(60) * time.Second): killCmd(cmd) <-cmdDone return errors.New("Command timed out") case err := <-cmdDone: if err != nil { log.Println(stderr.String()) } return err } } func killCmd(cmd *exec.Cmd) { if err := cmd.Process.Kill(); err != nil { log.Printf("Failed to kill command: %v", err) } } ================================================ FILE: imageprocessor/processorcommand/stripmetadata.go ================================================ package processorcommand func StripMetadata(filename string) error { args := []string{ "-all=", "--icc_profile:all", "-overwrite_original", filename, } err := runProcessorCommand("exiftool", args) if err != nil { return err } return nil } ================================================ FILE: imageprocessor/thumbType/thumbType.go ================================================ package thumbType type ThumbType int const ( UNKNOWN ThumbType = iota JPG PNG GIF WEBP ) func (this ThumbType) ToString() string { switch this { case JPG: return "JPG" case PNG: return "PNG" case GIF: return "GIF" case WEBP: return "WEBP" default: return "UNKNOWN" } } func FromMime(mime string) ThumbType { switch mime { case "image/jpeg": return JPG case "image/png": return PNG case "image/gif": return GIF case "image/webp": return WEBP default: return UNKNOWN } } func FromString(format string) ThumbType { switch format { case "jpg": return JPG case "jpeg": return JPG case "png": return PNG case "gif": return GIF case "webp": return WEBP default: return UNKNOWN } } ================================================ FILE: imagestore/factory.go ================================================ package imagestore import ( "io/ioutil" "log" "github.com/Imgur/mandible/config" "github.com/mitchellh/goamz/aws" "github.com/mitchellh/goamz/s3" "golang.org/x/oauth2" "golang.org/x/oauth2/google" gcloud "google.golang.org/cloud" gcs "google.golang.org/cloud/storage" ) type Factory struct { conf *config.Configuration } func NewFactory(conf *config.Configuration) *Factory { return &Factory{conf} } func (this *Factory) NewImageStores() ImageStore { stores := MultiImageStore{} var store ImageStore for _, configWrapper := range this.conf.Stores { switch configWrapper["Type"] { case "s3": store = this.NewS3ImageStore(configWrapper) stores = append(stores, store) case "gcs": store = this.NewGCSImageStore(configWrapper) stores = append(stores, store) case "local": store = this.NewLocalImageStore(configWrapper) stores = append(stores, store) case "memory": store = NewInMemoryImageStore() stores = append(stores, store) default: log.Fatalf("Unsupported store %s", configWrapper["Type"]) } } if len(this.conf.Stores) == 1 { return store } // return a MultiImageStore type if more then 1 store was specified in the config return stores } func (this *Factory) NewS3ImageStore(conf map[string]string) ImageStore { bucket := conf["BucketName"] auth, err := aws.GetAuth(conf["AWSKey"], conf["AWSSecret"]) if err != nil { log.Fatal(err) } client := s3.New(auth, aws.Regions[conf["Region"]]) mapper := NewNamePathMapper(conf["NamePathRegex"], conf["NamePathMap"]) return NewS3ImageStore( bucket, conf["StoreRoot"], client, mapper, ) } func (this *Factory) NewGCSImageStore(conf map[string]string) ImageStore { jsonKey, err := ioutil.ReadFile(conf["KeyFile"]) if err != nil { log.Fatal(err) } cloudConf, err := google.JWTConfigFromJSON( jsonKey, gcs.ScopeFullControl, ) if err != nil { log.Fatal(err) } bucket := conf["BucketName"] ctx := gcloud.NewContext(conf["AppID"], cloudConf.Client(oauth2.NoContext)) mapper := NewNamePathMapper(conf["NamePathRegex"], conf["NamePathMap"]) return NewGCSImageStore( ctx, bucket, conf["StoreRoot"], mapper, ) } func (this *Factory) NewLocalImageStore(conf map[string]string) ImageStore { mapper := NewNamePathMapper(conf["NamePathRegex"], conf["NamePathMap"]) return NewLocalImageStore(conf["StoreRoot"], mapper) } func (this *Factory) NewStoreObject(id string, mime string, size string) *StoreObject { return &StoreObject{ Id: id, MimeType: mime, Size: size, } } func (this *Factory) NewHashGenerator(store ImageStore) *HashGenerator { hashGen := &HashGenerator{ make(chan string), this.conf.HashLength, store, } hashGen.init() return hashGen } ================================================ FILE: imagestore/gcsstore.go ================================================ package imagestore import ( "io" "io/ioutil" "log" "os" "golang.org/x/net/context" "google.golang.org/cloud/storage" ) type GCSImageStore struct { ctx context.Context bucketName string storeRoot string namePathMapper *NamePathMapper } func NewGCSImageStore(ctx context.Context, bucket string, root string, mapper *NamePathMapper) *GCSImageStore { return &GCSImageStore{ ctx: ctx, bucketName: bucket, storeRoot: root, namePathMapper: mapper, } } func (this *GCSImageStore) Exists(obj *StoreObject) (bool, error) { _, err := storage.StatObject(this.ctx, this.bucketName, this.toPath(obj)) if err != nil { return false, err } return true, nil } func (this *GCSImageStore) Save(src string, obj *StoreObject) (*StoreObject, error) { srcFd, err := os.Open(src) if err != nil { return nil, err } defer srcFd.Close() data, err := ioutil.ReadAll(srcFd) if err != nil { log.Printf("error on read file: %s", err) return nil, err } wc := storage.NewWriter(this.ctx, this.bucketName, this.toPath(obj)) wc.ContentType = obj.MimeType if _, err := wc.Write(data); err != nil { log.Printf("error on write data: %s", err) return nil, err } if err := wc.Close(); err != nil { log.Printf("error on close writer: %s", err) return nil, err } obj.Url = "https://storage.googleapis.com/" + this.bucketName + "/" + this.toPath(obj) return obj, nil } func (this *GCSImageStore) Get(obj *StoreObject) (io.ReadCloser, error) { reader, err := storage.NewReader(this.ctx, this.bucketName, this.toPath(obj)) if err != nil { log.Printf("error on read file: %s", err) return nil, err } return reader, nil } func (this *GCSImageStore) String() string { return "GCSStore" } func (this *GCSImageStore) toPath(obj *StoreObject) string { if this.storeRoot != "" { return this.storeRoot + "/" + this.namePathMapper.mapToPath(obj) } return this.namePathMapper.mapToPath(obj) } ================================================ FILE: imagestore/hash.go ================================================ package imagestore import ( "crypto/rand" "log" ) // Provides a continuous stream of random image "hashes" of a fixed length that is unique (does not exist in the store). type HashGenerator struct { hashGetter chan string length int store ImageStore } func (this *HashGenerator) init() { go func() { storeObj := &StoreObject{ "", "", "original", "", } for { str := "" for len(str) < this.length { c := 10 bArr := make([]byte, c) _, err := rand.Read(bArr) if err != nil { log.Println("error:", err) break } for _, b := range bArr { if len(str) == this.length { break } /** * Each byte will be in [0, 256), but we only care about: * * [48, 57] 0-9 * [65, 90] A-Z * [97, 122] a-z * * Which means that the highest bit will always be zero, since the last byte with high bit * zero is 01111111 = 127 which is higher than 122. Lower our odds of having to re-roll a byte by * dividing by two (right bit shift of 1). */ b = b >> 1 // The byte is any of 0-9 A-Z a-z byteIsAllowable := (b >= 48 && b <= 57) || (b >= 65 && b <= 90) || (b >= 97 && b <= 122) if byteIsAllowable { str += string(b) } } } storeObj.Id = str exists, _ := this.store.Exists(storeObj) if !exists { this.hashGetter <- str } } }() } func (this *HashGenerator) Get() string { return <-this.hashGetter } ================================================ FILE: imagestore/localstore.go ================================================ package imagestore import ( "io" "os" "path" ) // A LocalImageStore stores images on the local disk. type LocalImageStore struct { storeRoot string namePathMapper *NamePathMapper } func NewLocalImageStore(root string, mapper *NamePathMapper) *LocalImageStore { return &LocalImageStore{ storeRoot: root, namePathMapper: mapper, } } func (this *LocalImageStore) Exists(obj *StoreObject) (bool, error) { if _, err := os.Stat(this.toPath(obj)); os.IsNotExist(err) { return false, err } return true, nil } func (this *LocalImageStore) Save(src string, obj *StoreObject) (*StoreObject, error) { srcFd, err := os.Open(src) if err != nil { return nil, err } defer srcFd.Close() // open output file this.createParent(obj) fo, err := os.Create(this.toPath(obj)) if err != nil { return nil, err } defer fo.Close() _, err = io.Copy(fo, srcFd) if err != nil { return nil, err } obj.Url = this.toPath(obj) return obj, nil } func (this *LocalImageStore) Get(obj *StoreObject) (io.ReadCloser, error) { reader, err := os.Open(this.toPath(obj)) if err != nil { return nil, err } return reader, nil } func (this *LocalImageStore) String() string { return "LocalStore" } func (this *LocalImageStore) createParent(obj *StoreObject) { path := path.Dir(this.toPath(obj)) if _, err := os.Stat(path); os.IsNotExist(err) { os.MkdirAll(path, 0777) } } func (this *LocalImageStore) toPath(obj *StoreObject) string { return this.storeRoot + "/" + this.namePathMapper.mapToPath(obj) } ================================================ FILE: imagestore/memorystore.go ================================================ package imagestore import ( "errors" "io" "io/ioutil" "os" "strings" "sync" ) type InMemoryImageStore struct { files map[string]string // name -> contents rw sync.Mutex } func NewInMemoryImageStore() *InMemoryImageStore { return &InMemoryImageStore{ files: make(map[string]string), rw: sync.Mutex{}, } } func (this *InMemoryImageStore) Exists(obj *StoreObject) (bool, error) { this.rw.Lock() _, ok := this.files[obj.Id] this.rw.Unlock() return ok, nil } func (this *InMemoryImageStore) Save(src string, obj *StoreObject) (*StoreObject, error) { srcFd, err := os.Open(src) if err != nil { return nil, err } defer srcFd.Close() data, err := ioutil.ReadAll(srcFd) if err != nil { return nil, err } this.rw.Lock() this.files[obj.Id] = string(data) this.rw.Unlock() return obj, nil } func (this *InMemoryImageStore) Get(obj *StoreObject) (io.ReadCloser, error) { this.rw.Lock() data, ok := this.files[obj.Id] this.rw.Unlock() if !ok { return nil, errors.New("File doesn't exist") } reader := strings.NewReader(data) readCloser := ioutil.NopCloser(reader) return readCloser, nil } func (this *InMemoryImageStore) String() string { return "InMemoryStore" } ================================================ FILE: imagestore/namepathmapper.go ================================================ package imagestore import ( "regexp" "strings" ) type NamePathMapper struct { regex *regexp.Regexp replace string } func NewNamePathMapper(expr string, mapping string) *NamePathMapper { var r *regexp.Regexp if len(expr) > 0 { r = regexp.MustCompile(expr) } return &NamePathMapper{ r, mapping, } } func (this *NamePathMapper) mapToPath(obj *StoreObject) string { repl := strings.Replace(this.replace, "${ImageName}", obj.Id, -1) repl = strings.Replace(repl, "${ImageSize}", obj.Size, -1) if this.regex != nil { return this.regex.ReplaceAllString(obj.Id, repl) } return repl } ================================================ FILE: imagestore/s3store.go ================================================ package imagestore import ( "io" "os" "github.com/mitchellh/goamz/s3" ) type S3ImageStore struct { bucketName string storeRoot string client *s3.S3 namePathMapper *NamePathMapper } func NewS3ImageStore(bucket string, root string, client *s3.S3, mapper *NamePathMapper) *S3ImageStore { return &S3ImageStore{ bucketName: bucket, storeRoot: root, client: client, namePathMapper: mapper, } } func (this *S3ImageStore) Exists(obj *StoreObject) (bool, error) { bucket := this.client.Bucket(this.bucketName) response, err := bucket.Head(this.toPath(obj)) if err != nil { return false, err } return (response.StatusCode == 200), nil } func (this *S3ImageStore) Save(src string, obj *StoreObject) (*StoreObject, error) { srcFd, err := os.Open(src) if err != nil { return nil, err } defer srcFd.Close() bucket := this.client.Bucket(this.bucketName) stats, err := srcFd.Stat() if err != nil { return nil, err } err = bucket.PutReader(this.toPath(obj), srcFd, stats.Size(), obj.MimeType, s3.BucketOwnerFull) if err != nil { return nil, err } obj.Url = bucket.URL(this.toPath(obj)) return obj, nil } func (this *S3ImageStore) Get(obj *StoreObject) (io.ReadCloser, error) { bucket := this.client.Bucket(this.bucketName) data, err := bucket.GetReader(this.toPath(obj)) if err != nil { return nil, err } return data, nil } func (this *S3ImageStore) String() string { return "S3Store" } func (this *S3ImageStore) toPath(obj *StoreObject) string { return this.storeRoot + "/" + this.namePathMapper.mapToPath(obj) } ================================================ FILE: imagestore/store.go ================================================ package imagestore import ( "fmt" "io" ) type ImageStore interface { Save(src string, obj *StoreObject) (*StoreObject, error) Exists(obj *StoreObject) (bool, error) Get(obj *StoreObject) (io.ReadCloser, error) String() string } type MultiImageStore []ImageStore func (this MultiImageStore) Save(src string, obj *StoreObject) (*StoreObject, error) { errs := make(chan error, len(this)) for _, store := range this { go func(s ImageStore) { _, err := s.Save(src, obj) if err != nil { errs <- fmt.Errorf("Error asynchronously saving image on %s: %s", s.String(), err.Error()) } else { errs <- nil } }(store) } for i := 0; i < len(this); i++ { select { case err := <-errs: if err != nil { return nil, err } } } return obj, nil } func (this MultiImageStore) Exists(obj *StoreObject) (bool, error) { errs := make(chan error, len(this)) results := make(chan bool, len(this)) for _, store := range this { go func(s ImageStore) { r, err := s.Exists(obj) if err != nil { errs <- fmt.Errorf("Error asynchronously proving existance for image on %s: %s", s.String(), err.Error()) } else { results <- r } }(store) } for i := 0; i < len(this); i++ { select { case err := <-errs: if err != nil { return false, err } case r := <-results: if r == true { return true, nil } } } return false, nil } func (this MultiImageStore) Get(obj *StoreObject) (io.ReadCloser, error) { errs := make(chan error, len(this)) results := make(chan io.ReadCloser, 1) done := make(chan bool, 1) for _, store := range this { go func(s ImageStore) { r, err := s.Get(obj) if err != nil { errs <- fmt.Errorf("Error asynchronously getting image on %s: %s", s.String(), err.Error()) } else { select { case done <- true: results <- r default: r.Close() } } }(store) } var err error for i := 0; i < len(this); i++ { select { case r := <-results: return r, nil case err = <-errs: } } return nil, err } func (this MultiImageStore) String() string { str := "" for _, store := range this { str += store.String() str += " " } return str } ================================================ FILE: imagestore/storeobject.go ================================================ package imagestore type StorableObject interface { GetPath() string } type StoreObject struct { Id string // Unique identifier MimeType string // i.e. image/jpg Size string // i.e. thumb Url string // if publicly available } func (this *StoreObject) Store(s StorableObject, store ImageStore) error { path := s.GetPath() obj, err := store.Save(path, this) if err != nil { return err } this.Url = obj.Url return nil } ================================================ FILE: main.go ================================================ package main import ( "fmt" "log" "net/http" "os" mandibleConf "github.com/Imgur/mandible/config" processors "github.com/Imgur/mandible/imageprocessor" mandible "github.com/Imgur/mandible/server" ) func main() { configFile := os.Getenv("MANDIBLE_CONF") config := mandibleConf.NewConfiguration(configFile) var server *mandible.Server var stats mandible.RuntimeStats if config.DatadogEnabled { var err error stats, err = mandible.NewDatadogStats(config.DatadogHostname) if err != nil { log.Printf("Invalid Datadog Hostname: %s", config.DatadogHostname) os.Exit(1) } log.Println("Stats init success") } else { stats = &mandible.DiscardStats{} } if os.Getenv("AUTHENTICATION_HMAC_KEY") != "" { key := []byte(os.Getenv("AUTHENTICATION_HMAC_KEY")) auth := mandible.NewHMACAuthenticatorSHA256(key) server = mandible.NewAuthenticatedServer(config, processors.EverythingStrategy, auth, stats) } else { server = mandible.NewServer(config, processors.EverythingStrategy, stats) } muxer := http.NewServeMux() server.Configure(muxer) port := fmt.Sprintf(":%d", server.Config.Port) log.Printf("Listening on Port: %s", port) stats.LogStartup() http.ListenAndServe(port, muxer) } ================================================ FILE: server/authenticator.go ================================================ package server import ( "crypto/hmac" "crypto/sha256" "encoding/base64" "encoding/json" "errors" "hash" "net/http" "time" ) var ( ErrNoAuthentication = errors.New("No authentication scheme was configured.") ErrEmptyAuth = errors.New("Empty or missing authentication header.") ErrNoGrantTime = errors.New("No grant time specified in the authentication grant.") ErrExpiredGrant = errors.New("The authentication grant has expired.") ErrMACMismatch = errors.New("The provided message authentication code is invalid for the given message.") ) type AuthenticatedUser struct { UserID string `json:"user_id"` GrantTime time.Time `json:"grant_time"` GrantDurationSeconds int64 `json:"grant_duration_sec"` } type Authenticator interface { GetUser(*http.Request) (*AuthenticatedUser, error) } type PassthroughAuthenticator struct{} func (auth *PassthroughAuthenticator) GetUser(req *http.Request) (*AuthenticatedUser, error) { return nil, ErrNoAuthentication } type HMACAuthenticator struct { key []byte h func() hash.Hash now time.Time } func (auth *HMACAuthenticator) SetTime(t time.Time) { auth.now = t } func NewHMACAuthenticatorSHA256(key []byte) *HMACAuthenticator { return &HMACAuthenticator{ key: key, h: sha256.New, } } func (auth *HMACAuthenticator) GetUser(req *http.Request) (*AuthenticatedUser, error) { authHeader := []byte(req.Header.Get("Authorization")) userProvidedHmacBase64 := req.Header.Get("X-Authorization-HMAC") if len(authHeader) == 0 || userProvidedHmacBase64 == "" { return nil, ErrEmptyAuth } userProvidedHmac, _ := base64.StdEncoding.DecodeString(userProvidedHmacBase64) macWriter := hmac.New(auth.h, auth.key) macWriter.Write(authHeader) expectedMac := macWriter.Sum(nil) if hmac.Equal(expectedMac, userProvidedHmac) { var authUser AuthenticatedUser err := json.Unmarshal(authHeader, &authUser) // Valid JSON but no shared values will unmarshal to the zero valued authenticated user; only pass back // a non-zero-valued authenticated user if err == nil && authUser.UserID != "" { if authUser.GrantTime.IsZero() { return nil, ErrNoGrantTime } else if authUser.GrantTime.Add(time.Duration(authUser.GrantDurationSeconds) * time.Second).Before(auth.now) { return nil, ErrExpiredGrant } else { return &authUser, nil } } } return nil, ErrMACMismatch } ================================================ FILE: server/authenticator_test.go ================================================ package server import ( "crypto/hmac" "crypto/sha256" "encoding/base64" "encoding/json" "net/http" "testing" "time" ) func TestPassthroughAuthenticatorAlwaysReturnsNilUser(t *testing.T) { req, _ := http.NewRequest("POST", "http://127.0.0.1/user/123/url", nil) authenticator := &PassthroughAuthenticator{} user, err := authenticator.GetUser(req) if user != nil { t.Fatalf("Expected authenticator of the passthrough authenticator to be nil, instead %+v", user) } if err != ErrNoAuthentication { t.Fatalf("Unexpected error: %s", err.Error()) } } func TestHMACAuthenticatorOnValidRequest(t *testing.T) { message := AuthenticatedUser{ UserID: "123", GrantTime: time.Now(), GrantDurationSeconds: 365 * 24 * 3600, } messageBytes, _ := json.Marshal(&message) messageMacWriter := hmac.New(sha256.New, []byte("foobar")) messageMacWriter.Write(messageBytes) messageMac := base64.StdEncoding.EncodeToString(messageMacWriter.Sum(nil)) req, _ := http.NewRequest("POST", "http://127.0.0.1/user/123/url", nil) req.Header.Set("Authorization", string(messageBytes)) req.Header.Set("X-Authorization-HMAC", string(messageMac)) authenticator := NewHMACAuthenticatorSHA256([]byte("foobar")) authenticator.SetTime(time.Now()) user, err := authenticator.GetUser(req) if user == nil { t.Fatalf("Expected authenticator of of a valid response to not return nil") } if err != nil { t.Fatalf("Unexpected error: %s", err.Error()) } } func TestHMACAuthenticatorOnEmptyHeader(t *testing.T) { req, _ := http.NewRequest("POST", "http://127.0.0.1/user/123/url", nil) req.Header.Set("Authorization", "") authenticator := NewHMACAuthenticatorSHA256([]byte("foobar")) user, err := authenticator.GetUser(req) if user != nil { t.Fatalf("Expected authenticator with no auth response to return nil") } if err != ErrEmptyAuth { t.Fatalf("Unexpected error: %s", err.Error()) } } func TestHMACAuthenticatorOnInvalidRequest(t *testing.T) { message := AuthenticatedUser{ UserID: "123", GrantTime: time.Now(), GrantDurationSeconds: 365 * 24 * 3600, } messageBytes, _ := json.Marshal(&message) // wrong key! messageMacWriter := hmac.New(sha256.New, []byte("jklfdsjklfsdjklfdsjklfsdjklfsd")) messageMacWriter.Write(messageBytes) messageMac := base64.StdEncoding.EncodeToString(messageMacWriter.Sum(nil)) req, _ := http.NewRequest("POST", "http://127.0.0.1/user/123/url", nil) req.Header.Set("Authorization", string(messageBytes)) req.Header.Set("X-Authorization-HMAC", string(messageMac)) authenticator := NewHMACAuthenticatorSHA256([]byte("foobar")) authenticator.SetTime(time.Now()) user, err := authenticator.GetUser(req) if user != nil { t.Fatalf("Expected authenticator of of an invalid response to return nil") } if err != ErrMACMismatch { t.Fatalf("Unexpected error: %s", err.Error()) } } func TestHMACAuthenticatorOnExpiredGrant(t *testing.T) { grantedTime := time.Now() requestTime := time.Now().Add(time.Hour) message := AuthenticatedUser{ UserID: "123", GrantTime: grantedTime, GrantDurationSeconds: 5, } messageBytes, _ := json.Marshal(&message) messageMacWriter := hmac.New(sha256.New, []byte("foobar")) messageMacWriter.Write(messageBytes) messageMac := base64.StdEncoding.EncodeToString(messageMacWriter.Sum(nil)) req, _ := http.NewRequest("POST", "http://127.0.0.1/user/123/url", nil) req.Header.Set("Authorization", string(messageBytes)) req.Header.Set("X-Authorization-HMAC", string(messageMac)) authenticator := NewHMACAuthenticatorSHA256([]byte("foobar")) authenticator.SetTime(requestTime) user, err := authenticator.GetUser(req) if user != nil { t.Fatalf("Expected authenticator of of an invalid response to return nil") } if err != ErrExpiredGrant { t.Fatalf("Unexpected error: %s", err.Error()) } } ================================================ FILE: server/server.go ================================================ package server import ( "encoding/base64" "encoding/json" "errors" "fmt" "io" "io/ioutil" "log" "net/http" "os" "path" "strings" "time" "github.com/gorilla/mux" "github.com/Imgur/mandible/config" "github.com/Imgur/mandible/imageprocessor" "github.com/Imgur/mandible/imagestore" "github.com/Imgur/mandible/uploadedfile" ) type Server struct { Config *config.Configuration HTTPClient *http.Client ImageStore imagestore.ImageStore hashGenerator *imagestore.HashGenerator processorStrategy imageprocessor.ImageProcessorStrategy authenticator Authenticator stats RuntimeStats } type ServerResponse struct { Error string `json:"error,omitempty"` Data interface{} `json:"data,omitempty"` Status int `json:"status"` Success *bool `json:"success"` // the empty value is the nil pointer, because this is a computed property } func (resp *ServerResponse) Write(w http.ResponseWriter, s RuntimeStats) { respBytes, _ := resp.json() if resp.Status >= http.StatusBadRequest { log.Println(fmt.Sprintf("HTTP error: %d -- %s", resp.Status, resp.Error)) s.Error(resp.Status) } w.WriteHeader(resp.Status) w.Header().Set("Content-Type", "application/json") w.Write(respBytes) } // The success property is a computed property on the response status // This can't implement the MarshalJSON() interface sadly because it would be recursive func (resp *ServerResponse) json() ([]byte, error) { var success bool success = (resp.Status == http.StatusOK) resp.Success = &success bytes, err := json.Marshal(resp) resp.Success = nil return bytes, err } type ImageResponse struct { Link string `json:"link"` Mime string `json:"mime"` Name string `json:"name"` Hash string `json:"hash"` Size int64 `json:"size"` Width int `json:"width"` Height int `json:"height"` OCRText string `json:"ocrtext"` Thumbs map[string]interface{} `json:"thumbs"` UserID string `json:"user_id"` } type OcrResponse struct { Hash string `json:"hash"` OCRText string `json:"ocrtext"` } type UserError struct { UserFacingMessage error LogMessage error } func NewServer(c *config.Configuration, strategy imageprocessor.ImageProcessorStrategy, stats RuntimeStats) *Server { factory := imagestore.NewFactory(c) httpclient := &http.Client{} stores := factory.NewImageStores() hashGenerator := factory.NewHashGenerator(stores) authenticator := &PassthroughAuthenticator{} return &Server{c, httpclient, stores, hashGenerator, strategy, authenticator, stats} } func NewAuthenticatedServer(c *config.Configuration, strategy imageprocessor.ImageProcessorStrategy, auth Authenticator, stats RuntimeStats) *Server { factory := imagestore.NewFactory(c) httpclient := &http.Client{} stores := factory.NewImageStores() hashGenerator := factory.NewHashGenerator(stores) return &Server{c, httpclient, stores, hashGenerator, strategy, auth, stats} } func (s *Server) uploadFile(uploadFile io.Reader, fileName string, thumbs []*uploadedfile.ThumbFile, user *AuthenticatedUser) ServerResponse { tmpFile, err := saveToTmp(uploadFile) if err != nil { return ServerResponse{ Error: "Error saving to disk!", Status: http.StatusInternalServerError, } } upload, err := uploadedfile.NewUploadedFile(fileName, tmpFile, thumbs) defer upload.Clean() if err != nil { return ServerResponse{ Error: "Error detecting mime type!", Status: http.StatusInternalServerError, } } processor, err := s.processorStrategy(s.Config, upload) if err != nil { log.Printf("Error creating processor factory: %s", err.Error()) return ServerResponse{ Error: "Unable to process image!", Status: http.StatusInternalServerError, } } err = processor.Run(upload) if err != nil { log.Printf("Error processing %+v: %s", upload, err.Error()) return ServerResponse{ Error: "Unable to process image!", Status: http.StatusInternalServerError, } } upload.SetHash(s.hashGenerator.Get()) factory := imagestore.NewFactory(s.Config) obj := factory.NewStoreObject(upload.GetHash(), upload.GetMime(), "original") uploadFilepath := upload.GetPath() obj, err = s.ImageStore.Save(uploadFilepath, obj) if err != nil { log.Printf("Error saving processed output to store: %s", err.Error()) return ServerResponse{ Error: "Unable to save image!", Status: http.StatusInternalServerError, } } thumbsResp, err := s.buildThumbResponse(upload) if err != nil { log.Printf("Error processing %+v: %s", upload, err.Error()) return ServerResponse{ Error: "Unable to process thumbnail!", Status: http.StatusInternalServerError, } } size, err := upload.FileSize() if err != nil { return ServerResponse{ Error: "Unable to fetch image metadata!", Status: http.StatusInternalServerError, } } width, height, err := upload.Dimensions() if err != nil { return ServerResponse{ Error: "Error fetching upload dimensions: " + err.Error(), Status: http.StatusInternalServerError, } } var userID string if user != nil { userID = string(user.UserID) } resp := ImageResponse{ Link: obj.Url, Mime: obj.MimeType, Hash: upload.GetHash(), Name: fileName, Size: size, Width: width, Height: height, OCRText: upload.GetOCRText(), Thumbs: thumbsResp, UserID: userID, } return ServerResponse{ Data: resp, Status: http.StatusOK, } } type fileExtractor func(r *http.Request) (uploadFile io.Reader, filename string, uerr *UserError) func (s *Server) Configure(muxer *http.ServeMux) { var extractorFile fileExtractor = func(r *http.Request) (uploadFile io.Reader, filename string, uerr *UserError) { uploadFile, header, err := r.FormFile("image") if err != nil { return nil, "", &UserError{LogMessage: err, UserFacingMessage: errors.New("Error processing file")} } s.stats.Upload("file") return uploadFile, header.Filename, nil } var extractorUrl fileExtractor = func(r *http.Request) (uploadFile io.Reader, filename string, uerr *UserError) { url := r.FormValue("image") uploadFile, err := s.download(url) if err != nil { return nil, "", &UserError{LogMessage: err, UserFacingMessage: errors.New("Error downloading URL!")} } s.stats.Upload("url") return uploadFile, path.Base(url), nil } var extractorBase64 fileExtractor = func(r *http.Request) (uploadFile io.Reader, filename string, uerr *UserError) { input := r.FormValue("image") b64data := input[strings.IndexByte(input, ',')+1:] uploadFile = base64.NewDecoder(base64.StdEncoding, strings.NewReader(b64data)) s.stats.Upload("base64") return uploadFile, "", nil } type uploadEndpoint func(fileExtractor, *AuthenticatedUser) http.HandlerFunc var uploadHandler uploadEndpoint = func(extractor fileExtractor, user *AuthenticatedUser) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { uploadFile, filename, uerr := extractor(r) if uerr != nil { log.Printf("Error extracting files: %s", uerr.LogMessage.Error()) resp := ServerResponse{ Status: http.StatusBadRequest, Error: uerr.UserFacingMessage.Error(), } resp.Write(w, s.stats) return } thumbs, err := parseThumbs(r) if err != nil { resp := ServerResponse{ Status: http.StatusBadRequest, Error: "Error parsing thumbnails!", } resp.Write(w, s.stats) return } resp := s.uploadFile(uploadFile, filename, thumbs, user) switch uploadFile.(type) { case io.ReadCloser: defer uploadFile.(io.ReadCloser).Close() break default: break } resp.Write(w, s.stats) } } // Wrap an existing upload endpoint with authentication, returning a new endpoint that 4xxs unless authentication is passed. authenticatedEndpoint := func(endpoint uploadEndpoint, extractor fileExtractor) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { requestVars := mux.Vars(r) attemptedUserIdString, ok := requestVars["user_id"] // They didn't send a user ID to a /user endpoint if !ok || attemptedUserIdString == "" { w.WriteHeader(http.StatusBadRequest) return } user, err := s.authenticator.GetUser(r) // Their HMAC was invalid or they are trying to upload to someone else's account if user == nil || err != nil || user.UserID != attemptedUserIdString { w.WriteHeader(http.StatusUnauthorized) log.Printf("Authentication error: %s", err.Error()) return } handler := endpoint(extractor, user) handler(w, r) } } ocrHandler := func(w http.ResponseWriter, r *http.Request) { imageID := r.FormValue("uid") if imageID == "" { resp := ServerResponse{ Status: http.StatusBadRequest, Error: "Image ID must be passed as \"uid\"", } resp.Write(w, s.stats) return } factory := imagestore.NewFactory(s.Config) tObj := factory.NewStoreObject(imageID, "", "original") storeReader, err := s.ImageStore.Get(tObj) if err != nil { resp := ServerResponse{ Status: http.StatusBadRequest, Error: fmt.Sprintf("Error retrieving image with ID: %s", imageID), } resp.Write(w, s.stats) return } defer storeReader.Close() storeFile, err := saveToTmp(storeReader) if err != nil { resp := ServerResponse{ Status: http.StatusBadRequest, Error: fmt.Sprintf("Error saving original image to tmpfile: %s", imageID), } resp.Write(w, s.stats) return } defer os.Remove(storeFile) upload, err := uploadedfile.NewUploadedFile("", storeFile, nil) if err != nil { resp := ServerResponse{ Error: fmt.Sprintf("Unable to generate UploadedFile object: %s", imageID), Status: http.StatusInternalServerError, } resp.Write(w, s.stats) return } upload.SetHash(imageID) defer upload.Clean() //TODO: fix this sp error: processor := imageprocessor.DuelOCRStratagy() err = processor.Process(upload) if err != nil { log.Printf("Error runinng DuelOCRStrategy on %+v: %s", upload, err.Error()) resp := ServerResponse{ Error: "Unable to execute OCR strategy", Status: http.StatusInternalServerError, } resp.Write(w, s.stats) return } ocrResp := OcrResponse{ Hash: upload.GetHash(), OCRText: upload.GetOCRText(), } resp := ServerResponse{ Data: ocrResp, Status: http.StatusOK, } resp.Write(w, s.stats) } thumbnailHandler := func(w http.ResponseWriter, r *http.Request) { imageID := r.FormValue("uid") factory := imagestore.NewFactory(s.Config) tObj := factory.NewStoreObject(imageID, "", "original") thumbs, err := parseThumbs(r) if err != nil { resp := ServerResponse{ Status: http.StatusBadRequest, Error: "Error parsing thumbnails!", } resp.Write(w, s.stats) return } if len(thumbs) != 1 { resp := ServerResponse{ Status: http.StatusBadRequest, Error: "Wrong number of thumbnails, expected 1", } resp.Write(w, s.stats) return } storeReader, err := s.ImageStore.Get(tObj) if err != nil { resp := ServerResponse{ Status: http.StatusNotFound, Error: fmt.Sprintf("Error retrieving image with ID: %s", imageID), } resp.Write(w, s.stats) return } defer storeReader.Close() storeFile, err := saveToTmp(storeReader) if err != nil { resp := ServerResponse{ Status: http.StatusInternalServerError, Error: "Error saving original Image!", } resp.Write(w, s.stats) return } defer os.Remove(storeFile) upload, err := uploadedfile.NewUploadedFile("", storeFile, thumbs) if err != nil { log.Printf("Error processing %+v: %s", storeFile, err.Error()) resp := ServerResponse{ Error: "Unable to process thumbnail!", Status: http.StatusInternalServerError, } resp.Write(w, s.stats) return } upload.SetHash(imageID) defer upload.Clean() processor, _ := imageprocessor.ThumbnailStrategy(s.Config, upload) err = processor.Run(upload) if err != nil { log.Printf("Error processing %+v: %s", upload, err.Error()) resp := ServerResponse{ Error: "Unable to process thumbnail!", Status: http.StatusInternalServerError, } resp.Write(w, s.stats) return } ts := upload.GetThumbs() t := ts[0] if !t.GetNoStore() { thumbName := fmt.Sprintf("%s/%s", upload.GetHash(), t.Name) tObj = factory.NewStoreObject(thumbName, upload.GetMime(), "thumbnail") err = tObj.Store(t, s.ImageStore) if err != nil { log.Printf("Error storing %+v: %s", t, err.Error()) resp := ServerResponse{ Error: "Unable to store thumbnail!", Status: http.StatusInternalServerError, } resp.Write(w, s.stats) return } } s.stats.Thumbnail(t.Name) http.ServeFile(w, r, t.GetPath()) } rootHandler := func(w http.ResponseWriter, r *http.Request) { fmt.Fprint(w, "An open source image uploader by Imgur") fmt.Fprint(w, "Congratulations! Your image upload server is up and running. Head over to the github page for documentation") fmt.Fprint(w, "


") fmt.Fprint(w, "") } requestMiddleware := func(handler http.HandlerFunc) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { s.stats.Request(r.URL.Path) if os.Getenv("MANDIBLE_DEBUG") == "true" { r.ParseForm() log.Printf("Request url: %s with get params: %v and Headers: %v", r.URL.Path, r.Form, r.Header) } start := time.Now() handler(w, r) elapsed := time.Since(start) s.stats.ResponseTime(elapsed, r.URL.Path) } } router := mux.NewRouter() router.HandleFunc("/file", requestMiddleware(uploadHandler(extractorFile, nil))) router.HandleFunc("/url", requestMiddleware(uploadHandler(extractorUrl, nil))) router.HandleFunc("/base64", requestMiddleware(uploadHandler(extractorBase64, nil))) router.HandleFunc("/user/{user_id}/file", requestMiddleware(authenticatedEndpoint(uploadHandler, extractorBase64))) router.HandleFunc("/user/{user_id}/url", requestMiddleware(authenticatedEndpoint(uploadHandler, extractorUrl))) router.HandleFunc("/user/{user_id}/base64", requestMiddleware(authenticatedEndpoint(uploadHandler, extractorBase64))) router.HandleFunc("/thumbnail", requestMiddleware(thumbnailHandler)) router.HandleFunc("/ocr", requestMiddleware(ocrHandler)) router.HandleFunc("/", requestMiddleware(rootHandler)) muxer.Handle("/", router) } func (s *Server) buildThumbResponse(upload *uploadedfile.UploadedFile) (map[string]interface{}, error) { factory := imagestore.NewFactory(s.Config) thumbsResp := map[string]interface{}{} for _, t := range upload.GetThumbs() { thumbName := fmt.Sprintf("%s/%s", upload.GetHash(), t.Name) tObj := factory.NewStoreObject(thumbName, upload.GetMime(), "thumbnail") err := tObj.Store(t, s.ImageStore) if err != nil { return nil, err } s.stats.Thumbnail(t.Name) thumbsResp[t.Name] = tObj.Url } return thumbsResp, nil } func (s *Server) download(url string) (io.ReadCloser, error) { req, err := http.NewRequest("GET", url, nil) if err != nil { return nil, err } req.Header.Add("User-Agent", s.Config.UserAgent) resp, err := s.HTTPClient.Do(req) if err != nil { // "HTTP protocol error" - maybe the server sent an invalid response or timed out return nil, err } if 200 != resp.StatusCode { return nil, errors.New("Non-200 status code received") } contentLength := resp.ContentLength if contentLength == 0 { return nil, errors.New("Empty file received") } return resp.Body, nil } func parseThumbs(r *http.Request) ([]*uploadedfile.ThumbFile, error) { thumbString := r.FormValue("thumbs") if thumbString == "" { return []*uploadedfile.ThumbFile{}, nil } type ThumbRequest struct { Width int `json:"width"` MaxWidth int `json:"max_width"` Height int `json:"height"` MaxHeight int `json:"max_height"` Shape string `json:"shape"` CropGravity string `json:"crop_gravity"` CropHeight int `json:"crop_height"` CropWidth int `json:"crop_width"` Quality int `json:"quality"` CropRatio string `json:"crop_ratio"` DesiredFormat string `json:"format"` NoStore bool `json:"nostore"` } var thumbRequests map[string]ThumbRequest err := json.Unmarshal([]byte(thumbString), &thumbRequests) if err != nil { fmt.Println(err.Error()) return nil, errors.New("Error parsing thumbnail JSON!") } var thumbs []*uploadedfile.ThumbFile for name, thumbRequest := range thumbRequests { thumb := uploadedfile.NewThumbFile( thumbRequest.Width, thumbRequest.MaxWidth, thumbRequest.Height, thumbRequest.MaxHeight, name, thumbRequest.Shape, "", // shape thumbRequest.CropGravity, thumbRequest.CropWidth, thumbRequest.CropHeight, thumbRequest.CropRatio, thumbRequest.Quality, thumbRequest.DesiredFormat, thumbRequest.NoStore, ) thumbs = append(thumbs, thumb) } return thumbs, nil } func saveToTmp(upload io.Reader) (string, error) { tmpFile, err := ioutil.TempFile(os.TempDir(), "image") if err != nil { fmt.Println(err) return "", err } defer tmpFile.Close() _, err = io.Copy(tmpFile, upload) if err != nil { fmt.Println(err) return "", err } return tmpFile.Name(), nil } ================================================ FILE: server/server_test.go ================================================ package server import ( "bytes" "crypto/hmac" "crypto/sha256" "encoding/base64" "encoding/json" "io/ioutil" "net/http" "net/http/httptest" "net/url" "strings" "testing" "time" "github.com/Imgur/mandible/config" "github.com/Imgur/mandible/imageprocessor" "github.com/Imgur/mandible/imagestore" ) func TestRequestingTheFrontPageGetsSomeHTML(t *testing.T) { cfg := &config.Configuration{ MaxFileSize: 99999999999, HashLength: 7, UserAgent: "Foobar", Stores: make([]map[string]string, 0), Port: 8888, } memcfg := make(map[string]string) memcfg["Type"] = "memory" cfg.Stores = append(cfg.Stores, memcfg) stats := &DiscardStats{} server := NewServer(cfg, imageprocessor.PassthroughStrategy, stats) muxer := http.NewServeMux() server.Configure(muxer) ts := httptest.NewServer(muxer) defer ts.Close() res, err := http.Get(ts.URL) if err != nil { t.Fatalf("Error when retrieving %s: %s", ts.URL, err.Error()) } body, err := ioutil.ReadAll(res.Body) if err != nil { t.Fatalf("Failed to read response body of %s: %s", ts.URL, err.Error()) } t.Logf("Response to %s/ was: %s", ts.URL, body) if res.StatusCode != 200 { t.Fatalf("Unexpected status code %d", res.StatusCode) } sbody := string(body) if !strings.Contains(sbody, "") { t.Fatalf("Did I get HTML back? Didn't find ...") } } func TestPostingBase64FilePutsTheFileInStorageAndReturnsJSON(t *testing.T) { cfg := &config.Configuration{ MaxFileSize: 99999999999, HashLength: 7, UserAgent: "Foobar", Stores: make([]map[string]string, 0), Port: 8888, } memcfg := make(map[string]string) memcfg["Type"] = "memory" cfg.Stores = append(cfg.Stores, memcfg) stats := &DiscardStats{} server := NewServer(cfg, imageprocessor.PassthroughStrategy, stats) muxer := http.NewServeMux() server.Configure(muxer) ts := httptest.NewServer(muxer) defer ts.Close() // a 1x1 base64 encoded transparent GIF b64bytes, _ := base64.StdEncoding.DecodeString(b64gif) values := make(url.Values) values.Add("image", b64gif) res, err := http.PostForm(ts.URL+"/base64", values) if err != nil { t.Fatalf("Error when uploading base64 GIF: %s", err.Error()) } body, err := ioutil.ReadAll(res.Body) if err != nil { t.Fatalf("Failed to read response body: %s", err.Error()) } t.Logf("Response to /base64 was: %s", body) if res.StatusCode != 200 { t.Fatalf("Unexpected status code %d", res.StatusCode) } var serverResp ServerResponse var imageResp ImageResponse err = json.Unmarshal(body, &serverResp) if err != nil { t.Fatalf("Unexpected error parsing response: %s", err.Error()) } if !*serverResp.Success { t.Fatalf("Uploading GIF was unsuccessful") } imageRespBytes, _ := json.Marshal(serverResp.Data) err = json.Unmarshal(imageRespBytes, &imageResp) if imageResp.Height != 1 { t.Fatalf("Expected height to be 1, instead %d", imageResp.Height) } if imageResp.Width != 1 { t.Fatalf("Expected width to be 1, instead %d", imageResp.Width) } if imageResp.Size != 42 { t.Fatalf("Expected size to be 42, instead %d", imageResp.Size) } if imageResp.Mime != "image/gif" { t.Fatalf("Expected image MIME type to be image/gif, instead %s", imageResp.Mime) } immStore := server.ImageStore exists, err := immStore.Exists(&imagestore.StoreObject{Id: imageResp.Hash}) if err != nil { t.Fatalf("Unexpected error checking if %s exists in in-memory image store: %s", imageResp.Hash, err.Error()) } if !exists { t.Fatalf("Expected to find %s in the in-memory storage, instead absent. Dump: %+v", imageResp.Hash, immStore) } storedBodyReader, err := immStore.Get(&imagestore.StoreObject{Id: imageResp.Hash}) if err != nil { t.Fatalf("Unexpected error fetching %s from in-memory image store: %s", imageResp.Hash, err.Error()) } storedBodyBytes, _ := ioutil.ReadAll(storedBodyReader) if !bytes.Equal(storedBodyBytes, []byte(b64bytes)) { t.Fatalf("Stored bytes %s != %s", storedBodyBytes, []byte(b64bytes)) } } func TestAuthentication(t *testing.T) { cfg := &config.Configuration{ MaxFileSize: 99999999999, HashLength: 7, UserAgent: "Foobar", Stores: make([]map[string]string, 0), Port: 8888, } memcfg := make(map[string]string) memcfg["Type"] = "memory" cfg.Stores = append(cfg.Stores, memcfg) authenticator := NewHMACAuthenticatorSHA256([]byte("foobar")) stats := &DiscardStats{} server := NewAuthenticatedServer(cfg, imageprocessor.PassthroughStrategy, authenticator, stats) muxer := http.NewServeMux() server.Configure(muxer) ts := httptest.NewServer(muxer) defer ts.Close() values := make(url.Values) values.Add("image", b64gif) req, err := http.NewRequest("POST", ts.URL+"/user/123/base64", strings.NewReader(values.Encode())) if err != nil { t.Fatalf("Error when forming authenticated base64 GIF upload request: %s", err.Error()) } message := AuthenticatedUser{ UserID: "123", GrantTime: time.Now(), GrantDurationSeconds: 365 * 24 * 3600, } messageBytes, _ := json.Marshal(&message) messageMacWriter := hmac.New(sha256.New, []byte("foobar")) messageMacWriter.Write(messageBytes) messageMac := base64.StdEncoding.EncodeToString(messageMacWriter.Sum(nil)) req.Header.Set("Content-Type", "application/x-www-form-urlencoded") req.Header.Set("Authorization", string(messageBytes)) req.Header.Set("X-Authorization-HMAC", string(messageMac)) httpclient := http.Client{} res, err := httpclient.Do(req) if err != nil { t.Fatalf("Error when uploading authenticated base64 GIF: %s", err.Error()) } body, err := ioutil.ReadAll(res.Body) if err != nil { t.Fatalf("Failed to read response body: %s", err.Error()) } t.Logf("Response to /base64 was: %s", body) if res.StatusCode != 200 { t.Fatalf("Unexpected status code %d", res.StatusCode) } var serverResp ServerResponse var imageResp ImageResponse err = json.Unmarshal(body, &serverResp) if err != nil { t.Fatalf("Unexpected error parsing response: %s", err.Error()) } if !*serverResp.Success { t.Fatalf("Uploading GIF was unsuccessful") } imageRespBytes, _ := json.Marshal(serverResp.Data) err = json.Unmarshal(imageRespBytes, &imageResp) if imageResp.Mime != "image/gif" { t.Fatalf("Expected image MIME type to be image/gif, instead %s", imageResp.Mime) } if imageResp.UserID != "123" { t.Fatalf("Expected user ID to be \"123\", instead \"%s\"", imageResp.UserID) } } func TestGetFullWebpThumb(t *testing.T) { cfg := &config.Configuration{ MaxFileSize: 99999999999, HashLength: 7, UserAgent: "Foobar", Stores: make([]map[string]string, 0), Port: 8888, } memcfg := make(map[string]string) memcfg["Type"] = "memory" cfg.Stores = append(cfg.Stores, memcfg) stats := &DiscardStats{} server := NewServer(cfg, imageprocessor.ThumbnailStrategy, stats) muxer := http.NewServeMux() server.Configure(muxer) ts := httptest.NewServer(muxer) defer ts.Close() thumbsJson, _ := json.Marshal(map[string]interface{}{ "webp": map[string]interface{}{ "format": "webp", }, }) values := make(url.Values) values.Add("image", b64dan) values.Add("thumbs", string(thumbsJson)) res, err := http.PostForm(ts.URL+"/base64", values) if err != nil { t.Fatalf("Error when uploading base64 image: %s", err.Error()) } body, err := ioutil.ReadAll(res.Body) if err != nil { t.Fatalf("Failed to read response body: %s", err.Error()) } t.Logf("Response to /base64 was: %s", body) if res.StatusCode != 200 { t.Fatalf("Unexpected status code %d", res.StatusCode) } var serverResp ServerResponse var imageResp ImageResponse err = json.Unmarshal(body, &serverResp) if err != nil { t.Fatalf("Unexpected error parsing response: %s", err.Error()) } if !*serverResp.Success { t.Fatalf("Uploading image was unsuccessful") } imageRespBytes, _ := json.Marshal(serverResp.Data) err = json.Unmarshal(imageRespBytes, &imageResp) if len(imageResp.Thumbs) == 0 { t.Fatalf("Expected thumbs to contain data, instead blank") } if _, ok := imageResp.Thumbs["webp"]; !ok { t.Fatalf("Expected webp thumb, not given") } immStore := server.ImageStore storeId := imageResp.Hash + "/webp" exists, err := immStore.Exists(&imagestore.StoreObject{Id: storeId}) if err != nil { t.Fatalf("Unexpected error checking if %s exists in in-memory image store: %s", storeId, err.Error()) } if !exists { t.Fatalf("Expected to find %s in the in-memory storage, instead absent. Dump: %+v", storeId, immStore) } storedBodyReader, err := immStore.Get(&imagestore.StoreObject{Id: storeId}) if err != nil { t.Fatalf("Unexpected error fetching %s from in-memory image store: %s", storeId, err.Error()) } storedBodyBytes, _ := ioutil.ReadAll(storedBodyReader) if len(storedBodyBytes) == 0 { t.Fatalf("Expected webp thumbnail to be larger than 0 bytes") } if int64(len(storedBodyBytes)) >= imageResp.Size { t.Fatalf("Expected thumbnail to be smaller than original image, %v vs %v", int64(len(storedBodyBytes)), imageResp.Size) } } func TestGetSizedWebpThumb(t *testing.T) { cfg := &config.Configuration{ MaxFileSize: 99999999999, HashLength: 7, UserAgent: "Foobar", Stores: make([]map[string]string, 0), Port: 8888, } memcfg := make(map[string]string) memcfg["Type"] = "memory" cfg.Stores = append(cfg.Stores, memcfg) stats := &DiscardStats{} server := NewServer(cfg, imageprocessor.ThumbnailStrategy, stats) muxer := http.NewServeMux() server.Configure(muxer) ts := httptest.NewServer(muxer) defer ts.Close() thumbsJson, _ := json.Marshal(map[string]interface{}{ "webp": map[string]interface{}{ "format": "webp", }, "webpthumb": map[string]interface{}{ "format": "webp", "shape": "custom", "width": 10, "height": 10, }, }) values := make(url.Values) values.Add("image", b64dan) values.Add("thumbs", string(thumbsJson)) res, err := http.PostForm(ts.URL+"/base64", values) if err != nil { t.Fatalf("Error when uploading base64 iamge: %s", err.Error()) } body, err := ioutil.ReadAll(res.Body) if err != nil { t.Fatalf("Failed to read response body: %s", err.Error()) } t.Logf("Response to /base64 was: %s", body) if res.StatusCode != 200 { t.Fatalf("Unexpected status code %d", res.StatusCode) } var serverResp ServerResponse var imageResp ImageResponse err = json.Unmarshal(body, &serverResp) if err != nil { t.Fatalf("Unexpected error parsing response: %s", err.Error()) } if !*serverResp.Success { t.Fatalf("Uploading image was unsuccessful") } imageRespBytes, _ := json.Marshal(serverResp.Data) err = json.Unmarshal(imageRespBytes, &imageResp) if len(imageResp.Thumbs) == 0 { t.Fatalf("Expected thumbs to contain data, instead blank") } if _, ok := imageResp.Thumbs["webp"]; !ok { t.Fatalf("Expected webp thumb, not given") } immStore := server.ImageStore storeId := imageResp.Hash + "/webp" storeIdSmall := imageResp.Hash + "/webpthumb" exists, err := immStore.Exists(&imagestore.StoreObject{Id: storeIdSmall}) if err != nil { t.Fatalf("Unexpected error checking if %s exists in in-memory image store: %s", storeIdSmall, err.Error()) } if !exists { t.Fatalf("Expected to find %s in the in-memory storage, instead absent. Dump: %+v", storeIdSmall, immStore) } storedBodyReader, err := immStore.Get(&imagestore.StoreObject{Id: storeId}) if err != nil { t.Fatalf("Unexpected error fetching %s from in-memory image store: %s", storeId, err.Error()) } storedBodyReaderSmall, err := immStore.Get(&imagestore.StoreObject{Id: storeIdSmall}) if err != nil { t.Fatalf("Unexpected error fetching %s from in-memory image store: %s", storeIdSmall, err.Error()) } storedBodyBytes, _ := ioutil.ReadAll(storedBodyReader) storedBodyBytesSmall, _ := ioutil.ReadAll(storedBodyReaderSmall) if len(storedBodyBytesSmall) == 0 { t.Fatalf("Expected webp thumbnail to be larger than 0 bytes") } if len(storedBodyBytesSmall) >= len(storedBodyBytes) { t.Fatalf("Expected thumbnail to be smaller than original image, %v vs %v", len(storedBodyBytesSmall), len(storedBodyBytes)) } } func TestTooLarge(t *testing.T) { cfg := &config.Configuration{ MaxFileSize: 99999999999, HashLength: 7, UserAgent: "Foobar", Stores: make([]map[string]string, 0), Port: 8888, } memcfg := make(map[string]string) memcfg["Type"] = "memory" cfg.Stores = append(cfg.Stores, memcfg) stats := &DiscardStats{} server := NewServer(cfg, imageprocessor.ThumbnailStrategy, stats) muxer := http.NewServeMux() server.Configure(muxer) ts := httptest.NewServer(muxer) defer ts.Close() thumbsJson, _ := json.Marshal(map[string]interface{}{ "webp": map[string]interface{}{ "format": "webp", "shape": "custom", "width": 20000, "height": 20000, }, }) values := make(url.Values) values.Add("image", b64dan) values.Add("thumbs", string(thumbsJson)) res, err := http.PostForm(ts.URL+"/base64", values) if err != nil { t.Fatalf("Error when uploading base64 iamge: %s", err.Error()) } body, err := ioutil.ReadAll(res.Body) if err != nil { t.Fatalf("Failed to read response body: %s", err.Error()) } t.Logf("Response to /base64 was: %s", body) if res.StatusCode != 500 { t.Fatalf("Unexpected status code %d", res.StatusCode) } var serverResp ServerResponse err = json.Unmarshal(body, &serverResp) if err != nil { t.Fatalf("Unexpected error parsing response: %s", err.Error()) } if *serverResp.Success { t.Fatalf("Uploading large image was successful") } } func TestTooSmall(t *testing.T) { cfg := &config.Configuration{ MaxFileSize: 99999999999, HashLength: 7, UserAgent: "Foobar", Stores: make([]map[string]string, 0), Port: 8888, } memcfg := make(map[string]string) memcfg["Type"] = "memory" cfg.Stores = append(cfg.Stores, memcfg) stats := &DiscardStats{} server := NewServer(cfg, imageprocessor.ThumbnailStrategy, stats) muxer := http.NewServeMux() server.Configure(muxer) ts := httptest.NewServer(muxer) defer ts.Close() thumbsJson, _ := json.Marshal(map[string]interface{}{ "webp": map[string]interface{}{ "format": "webp", "shape": "custom", "width": 0, "height": 0, }, }) values := make(url.Values) values.Add("image", b64dan) values.Add("thumbs", string(thumbsJson)) res, err := http.PostForm(ts.URL+"/base64", values) if err != nil { t.Fatalf("Error when uploading base64 image: %s", err.Error()) } body, err := ioutil.ReadAll(res.Body) if err != nil { t.Fatalf("Failed to read response body: %s", err.Error()) } t.Logf("Response to /base64 was: %s", body) if res.StatusCode != 500 { t.Fatalf("Unexpected status code %d", res.StatusCode) } var serverResp ServerResponse err = json.Unmarshal(body, &serverResp) if err != nil { t.Fatalf("Unexpected error parsing response: %s", err.Error()) } if *serverResp.Success { t.Fatalf("Uploading small image was successful") } } func TestGetTallThumb(t *testing.T) { cfg := &config.Configuration{ MaxFileSize: 99999999999, HashLength: 7, UserAgent: "Foobar", Stores: make([]map[string]string, 0), Port: 8888, } memcfg := make(map[string]string) memcfg["Type"] = "memory" cfg.Stores = append(cfg.Stores, memcfg) stats := &DiscardStats{} server := NewServer(cfg, imageprocessor.ThumbnailStrategy, stats) muxer := http.NewServeMux() server.Configure(muxer) ts := httptest.NewServer(muxer) defer ts.Close() thumbsJson, _ := json.Marshal(map[string]interface{}{ "tallthumb": map[string]interface{}{ "shape": "custom", "crop_gravity": "north", "crop_ratio": "1:2.25", "max_width": 10, }, }) values := make(url.Values) values.Add("image", b64dan) values.Add("thumbs", string(thumbsJson)) res, err := http.PostForm(ts.URL+"/base64", values) if err != nil { t.Fatalf("Error when uploading base64 iamge: %s", err.Error()) } body, err := ioutil.ReadAll(res.Body) if err != nil { t.Fatalf("Failed to read response body: %s", err.Error()) } t.Logf("Response to /base64 was: %s", body) if res.StatusCode != 200 { t.Fatalf("Unexpected status code %d", res.StatusCode) } var serverResp ServerResponse var imageResp ImageResponse err = json.Unmarshal(body, &serverResp) if err != nil { t.Fatalf("Unexpected error parsing response: %s", err.Error()) } if !*serverResp.Success { t.Fatalf("Uploading image was unsuccessful") } imageRespBytes, _ := json.Marshal(serverResp.Data) err = json.Unmarshal(imageRespBytes, &imageResp) if len(imageResp.Thumbs) == 0 { t.Fatalf("Expected thumbs to contain data, instead blank") } if _, ok := imageResp.Thumbs["tallthumb"]; !ok { t.Fatalf("Expected cropped thumb, not given") } immStore := server.ImageStore storeId := imageResp.Hash storeIdSmall := imageResp.Hash + "/tallthumb" exists, err := immStore.Exists(&imagestore.StoreObject{Id: storeIdSmall}) if err != nil { t.Fatalf("Unexpected error checking if %s exists in in-memory image store: %s", storeIdSmall, err.Error()) } if !exists { t.Fatalf("Expected to find %s in the in-memory storage, instead absent. Dump: %+v", storeIdSmall, immStore) } storedBodyReader, err := immStore.Get(&imagestore.StoreObject{Id: storeId}) if err != nil { t.Fatalf("Unexpected error fetching %s from in-memory image store: %s", storeId, err.Error()) } storedBodyReaderSmall, err := immStore.Get(&imagestore.StoreObject{Id: storeIdSmall}) if err != nil { t.Fatalf("Unexpected error fetching %s from in-memory image store: %s", storeIdSmall, err.Error()) } storedBodyBytes, _ := ioutil.ReadAll(storedBodyReader) storedBodyBytesSmall, _ := ioutil.ReadAll(storedBodyReaderSmall) if len(storedBodyBytesSmall) == 0 { t.Fatalf("Expected webp thumbnail to be larger than 0 bytes") } if len(storedBodyBytesSmall) >= len(storedBodyBytes) { t.Fatalf("Expected thumbnail to be smaller than original image, %v vs %v", len(storedBodyBytesSmall), len(storedBodyBytes)) } } var ( b64gif = "R0lGODlhAQABAIAAAAAAAP" + "/" + "/" + "/yH5BAEAAAAALAAAAAABAAEAAAIBRAA7" b64dan = "iVBORw0KGgoAAAANSUhEUgAAADIAAAAyCAIAAACRXR/mAAAWAUlEQVRYw7V5eZBdV3nnd87dl/fu23tfXneru9Wtllp7y8Y2MpJtIMYTiBkYMFAZCFtqKiQMJjOkUsNUYJIKqRomhRNCGDAwsROQDHiRN2HtUktqSd3qVu/L63799v3d/d5z5g9M4mQIIn/Mr84ft26duvWr7/6+3/d956ADCbXumI6HHAwxzPgezjkucAAuSBgrLHIJbfhIlmTHtVxZ5m2L1vVEQnN1iwNbFGG9BG1BzrM9cGmZsm97+D0PffCJncN9mXTlmW/+T7c6wyP+8U89OXbgYLWqZzcKQ2GobLyUWr2Smr1dmzdOrkJ/r7RH9nMlYhRB0mDwoMBu5I0CkC4Gt/lkDfx9HeEhx8o16QUHAODxLjZS9TcNKji2KsN8wWyLQNdupdEw1SC0JyNr9fZQf2h3f9ys6cRzM47/xT/+w/GJAwAAAD2wdOr7G7Fd9/zWRz7y8zdwD9y+NvXs0yupSt2xpG4O1CCe2BOP+3nkGhtZulKC3FkPCQAMQBJAAagDjPHQF4JUHk4CWABDAKMAFACxQDzo1SB5fBfpHipnq7prarFIIBD1AvFgS7vTqNSzq+n0qsMnDk3cGwlzyWhcdZscD7NZ9Oh/+B0AeOW11774X5+8MTkFrMhFE67VCGh8F891dqgV30rNpVglxEVakd9EHbzcyZBNijlZe3skrCbEYCTou2wJy54UCoXY4bYWnuGoLAa1cHVz7vat01hCpbU1IYBFAeyMGeofNoPxeqMc0VTbIOupYjq9Fg2HRoYH3nH8Hb/5H5+8fGVq8vz5l09ffPHFF+FXIxgIaFpQo+jcqec4CRtYSqgBUWAdhuOAQwIVMQ7wgmW6hXrFcizXrqfXZ//+r34wf/H2vl2gRvHaGnGakOyBloGWConrdnmgs7Vvx0EDeOrwum5PvvoPRcx/8MOfeOmF888+/wr8W8BWrVw52/Sb1uX8FmWobdBKveJ7nqnbmWqD4aQWlW9VeFmprk5fmztfCwCEYyg2mIR4LLu5LcR1XlL29fTNr8nXbywin91/5OjSypZu1ro6drz8o0vb6392/3jfeK+6st5s/JqcWJb9vc/9yWah4NkucT1AKK5E69RByJFZrq67QVW6bywZijaLc9nMSv3h4+HMagUwyyC5a7CTC8vVdG7rVioYLFK3/vLVxrmpa3+a6A9Ijf/1F8+7oXiyNXQ1XZKFzYO7Yn0KXW6o06ncr8OMYao11vFkQjQACSDGCmGOa3i2iKGXwKG4lGypBGTXs3yGcXqGI1pbBAtSJN5HefnqhctzF1NLW87klU270HSBSRvuG9duW6u5etFbqDXzTZMCbFWs8b6OUI9/qC+6smHXbPtXc8IYM+0sGyBEABjBOIDQtGMqthHzfNd2w8Rrj1mBKAlH1Y07teK2nxyNhxN929laoVgqZtdIOe+UQAKQGcjVvYbjcQA1F3Ilmye0DEAAAEBgICDYbKh1ICZgYt3e1O9Ki816XgwgjhHBKOVTAUAAaAfYI2G2g5nMunjTTChbmRK0akg3vOzmAkeF7r62AM8FhsaU3wpIctBxvUyxsbGQqtQKOm7OXK+U6qACEAAXQKNQqdQHhdaiZQeDXkcikM7fRWZsLKTRaq1OYdXzEUAPQAeDNBnCIrOccZs+H6BQKLmqSrV2UNTWh4/e19bWHolpWjAKiAVVBlUEwwSLgmlvbiwuL90+r5wplO1s0ZxZKhUphGRoT3YDW7s+v8SaVJPVNNyFFvrvv/8H/+MvvsYCSABhgCEBNUW8YvmjAfAZJmuyySA5cF9PKBGPReOHJw51j+wBOQDIB5YDH0PTAUSBw0B4MDwwDDe3nc5texjrHj19/szZy1fEWBQLXDqzvr3myCxeb+Ca5f3qTGQ+9sQnGeKsLC/1A/SyKCzD2QbZ9uDe+ztKDXMh64wPsAce7Bkd2L97566OjnbgRfApeDYl1Pd85ANQjmIBYZG4PvI8RpRDoYgmy4FgYGjXrlhLtFYqbS4vrs05rIM8h6Ztcldt4R+/fn7i+LvG+rqbAADwsyYtEohFg70j/Rww4zK0tDEUeIFHImAgDLFdIEB9FvkMSxkAQAgwUJ/YwIPPeMQ1wPbBcp1q1a/ooz1jw8O7tIDWGcetEdrVGUGYubtB1E2fATR+YP/Jy5fuEKhSAICBHa0T/ZH1S6uEUE3zdu8Z2dk/Gg5HAFEQeRAVQCyiCAgABd/zgTIMiMhnkeMgy0KW6zu27/u1aq3eqKvxUL1p5CpbSlg5eGCwYaFMsX6XTGzt7jO4sNK5+9O/+0c3V2dyG2vLs7dc26ivlfIV4mGQAmyAESJyCHiGUh8BAkQopr7lYsogj0WOB8igPEtZFrM8YI74LkEMweBjQomjuHxPx8D06jaDLYE6CYnc3ei//tdPeTwLlh8Vgw7TzG9vPvOtb65Nff/69HwVoF+CiByIaZ3ACmA4LkJYRSzlkEsRAt8njONgDyNOIoQS8BFlwWWJ71LfIb5DiccyrOuavGAO7+lcnp2fvjabrvwa9eepr35ho5DhbSIyaslt+qwwkoh0ydyNO80GAEIwtvPQYHIMPB9cn5VkjDhKKEgClhTsM2AB8VkCPDYNxjURyxCFQxbn2k3XchiEAVOf4ZpG06OWEgnnUoV81bs7re8//fTPn/btPvy+J35zZOLYM1//+uqNCmtDfyT4zo/enxwZBs8AJQAiwhwDjoUoARcD9SHWC4FWDED9BhgqNXXADsIUOy7ne5zvc4RFxPZ9h5q0spFmFT6eDLP5Clh3YcYghGNAKUChWWcQRNq6NaM+1JiLuxbPK7/3t3/ZOfFuYCmmpJHOLy+s6RWTIWxuaev26+ezM7eM4pKMDY7nUSAASgBhhGwKCLOixFBkOMT2kNVwc8Xs1JWb2Y2iHMGZKlNtunepiQKDDEo5ANN1lheXJq9Mf2C87zOPPuz6mfmtVEB2enYOL3n84x/7428883pH745gYuDqzZWFjXy64ly9Pp1NrQ+0tahxBZwmNSiql0gh69sOViRWFBHDGq7XqJQKta0LF1ZqJdqawNcWTY/+qlARAqztEwD4x+JpV3OdIZl91/F+PdVRTw/GYvjSy0bOOHfxat/Yvg+959j01KTPu6HRZKaQQSWlYfPf+D8/vWd24eHPfgozHNQbhlVaW1xNl7KW54mSghhEVV/EIb2ByizcSUuWb91NWoT97Oe+pPKSTxESEctIgmP0D3WA0dxc26Ieam+Jy1gaC7m/f9/YkmH+5AffW5y68UalVI0mHjw0HrQLnGupQe3ET85+5+zSpz/0GG83N9cz+cz6rdVlD/k7h3clNA3zni1DgyXA8pZu8wDO/0NE5tnDY8n9XWxMtbs72llVE4DaEiOV641CIRv16uoD/ZAxSltmJJgI8xJIIXli5+fd0qnT17tHR0cP7e9YTb987dbRiXeUlBvlOzcfOjJuiotf/ObzJ55/1fHsw6Ndh3cPR3tHo4Egdt1SUV9ZmBYVyxFxT7v87B+9jzQbnIyuzpl/9t3J3h3tQcH/jaPjxw4llMEIMBY0V8EDBAAYQAAwAQCgFcHqc38jde0oVRt3rvxob3+f0jUIg+1Gc3P2J5c6kmMoIkhiq2kahfU0sYyx0eRT3/mbp3/66k9//Mrc6Rsf+MKTx963XwzHrAYNE7ZL5USB5Ot6qWzeXssFYjgZk3zbjEdV1xQuTW1/+9tf+7u//cbK6oLjmQ29qdvIMF2GBRYA4mrQsAzwPACIK5KkaSCZQkPv6UtShgDySd2SE4OD98vbc8t2gdioIIakYJBN7j9o68a5c1e+/Aefb9k/Ri7efPdI/8MPHs9W9af++sTQQGdhbWmwt3380EO+oK6ffO3UGxff+uN4gIOPfybWvtOhw65HeVHlFU6V5IAqswDgMmj/oSO3V1Yc29nVOwJCAIZ7zMKVejEb7e4E8LBDoWRobQktGKzenM+tbabXa1mwlu/c2sxkPv6pjx//d4/C2uxaPt10zI5Ix0hcaPtkhJfxjclXeAGvzC1mGm7Va4wd2LG+WtOrZUI8DqMgB8VS8dO/++jgyO5SvgCEEs8HzyHUZYMcX67VQuHQM8+enN/WtXoDqgR8UUbYwQQRDJZNbRsQCwQQFkN794YG9uzwGIdzyht39uwaTewZhfI2WIwqcpi6J5/9+wceOtaZTBBdf/s9j5ieszR/e/n6qrxD//JXDz75sdM3L3kA4BHKMQwD/lf/2xd+icuHMHYAXjj9ajafrTm0US6dDEr/6bF3huSmuCMKIADhiNFAHsKiSiUBBYMQEjBFottssSuM34D1BUpZ5EkVZD34zqNmoTlz9qIUUGReFJRg0dSNOg7JdmsyPbeUKxffNFIK4LOYgP9WH+U4FjM8L4hon6JYur78i6QVpICNpBYj/+UnDr3r/e+NYk3kBBA5pGhEkKmsMkoQmk0ztW4Uc2FZwAJQxkNaAkre6TM/K7jWIw+826zXbs3ebBiO5/g5vVi1nKWV21YyvVGEa6+96aT7x/fJUDvy4CNvf/f7zWZNZXmGUgZhiihBhI3ynOWxImA70j6b27Qd+7HH/n2Pmg/0EKteMFWecQjveZRhMYNBJ9BompmMYdVNu1reLPT2Jbkd/dC+A8SmzFzlmjbSQq0tLYrIluoNt2bnm4WN8srrb9SvTHKEYQAsAARAtjPbbSqbWl372cs/rVcrjmWahmmalmMbruewPMuSUBD52JNkFiHGd3584lsffehg575dlVI1KLW7iCLHQTWdIYyNDbNepz5Ed+3yfPu5P//qC8+f+PDn/0usZw9wTTPAeXUWAAPDUMwC4Lpj5xq6UfMV23aog3wW4M1oZXJZzo9PPfciwIu/rJ2nkNNtTRJtp8EDdAG3AG5bTBF5tlgzEgg88ASGCyqiA262USccam1JEMdgFem9v/2Jcy+8dOrET7SL19aW1y+8fu3Yex7hRR4wBpYjNp1fXCoh/ZNf+dLQg8dPfeCzlHq/0BUAAIu9Vk3J1vRfMmLsVaWZSq1umh4Q10MRIvtgPfmRhx1S3a42A1rMJS5hGTUY4hTZsu3FteWt9JamBTy9ks5vqx1dJcc7fea1wvbmYM/w0eMPd/Z02LWyrjeyuULJM8aPTnQO9/WOH1ad8sZqpqMlsjPZHlKkbLnOOSRnWL+0g0Af701c36hsU9cGMBEjU/quwzu+/7XPvXLmhcm5jaMPvFNTRODEeKK9pbPTNvQf/vhEZmWzXQtF4iEhGjYZQVKjlmcsLc3cN/a2gVgftpuOZ+X12ura+ts++qFAWC3Nzwf6h3h9izIYxdsBePCslZV8eX7x7PnL333u9Mzy+r+MVkJifFaNaEGPxUXLsoB+5XceHdw30ijU55YWDddQlCAlgDheVoJ1s9HZ0fnAvQ96q3kmaxpFo7qZpbpTadZdjrWw0tB14jc4TFMrGy3jw117j29dejE80C+EOt3SNNM+BBAFCACOR2KJjuGBex5626c/8V7Vdl69eOOf9VvbFtkwGim9WbVtAHjiyPh//sR7kdXEjHJncW4lteAQQAyrhLXtXCaf3e5p61S7Eh29nd2dgzvah2J8zMs3lq5c3rqzWKrWG77OqgjZDgjC3vd/1GimK9tbiZ1H3Nw6iMDI/ZWl2frKTcvKOnqeVwWEWeDIPRO7n3/xTCZX+qfJx6CUE/juSPjYvqHHDiR/4/j9EIuQQjHRoo3uHLn1o2kfZpouqXpOOBQhlp+PFoOtcd+upApb5YyZULt6BgYdzu1HjtARrXk1RQ7WK/W20UFAofLsq6GeAYAA6EWuqw1AWLsxOXX5tSMTD4wemADP8zzMmDqicHT/yPXpxbccJGH45p88+Z3vfuU9x0YG9yaBZYBBoKrUo6Fo9M7s1fnZUi6fohwfjsSIy2lKqLWrHTFsrlS5MnNjKrdSkJ1gT1vrQH9Xb3ssEiIeUy7WBsd3SyEuNTnZtnMYcwzZXmbaOozs9g/+/E8FzLfGu9xijkEmDcZYPgTlRqVa8DwS12KpbJYCMACQzxcef8eEsGMnNGsUiwQzIIrAcaqsdHX0TF+7klp3edV0/CamcjiktcdVJhSOtXZHRJVwAhsUtVDQMW3HbHiELK+mbl29MX7wkJpQVmbmu4eSeq30o6f+crAjKoZaL516OZXfdH2IKGLbkXEh1Is4GSHXdY1b567WGv5GMQ+EMCzLpjL57z370sTBA117HkGKDxWLADDRIPFoRI5E4m03rl5KbZoGykk8p2mBUiUXYCVZCtqOjxkuEg5zLItZdiuXX9lcn5yc+vaJM/GgenDP3q217UBIDLT3fOkzf7gwdfvY+96vBRI5veEa1eG+rpa9+4ALALDAgVmtn79w89b8FisIkUCAwRgTQhqm9e2n/4Ga+sTEvXx7D8Y+cQiOqmD7sXA83huau3F7btqvoRqhiBKlYjvlhrmRz20WNijD5HK1uZWbs6uzz710eT3V0Hlt5c7tT33wCZ7wSwtz3XsONzO5xampYxNHJCVqmH6zlu1pDSZ2DFApiEAE5CPDmLmzAASN9LfuHmx/k9bPhXbmwuUfPP1D5Dm7BncIbYPASkjlgKBEuK0/mUhtLF+eqS8v5zzL9HFjcXOjWK8rslgqlG/MzLx+9o3XLszNbzZ04LRgMJXe/Nxvf5BUGhdOvbr37Q8mRCGzMsPyvGV6iFcYzxob7pF6O5AQoiAjBAKQW9dvXbhwJSzrvlP6Z7QAoNponnrt3HeePpnf3EqIXGtvkon0SiwvMELnzphs5lcXSpObxVvTqzeXVuZX11fW1+fuTJ+5Mju9pusWAIBtm5VqyaFQS904e/LvGIru/cCHjdSm4dlSJCAqku84gtsc6u9EEY3KUYAwohS53vydhe/98PnsVnkjXflHWuitPts0zQuTU3/19A8nX59s5NMBVYy3dPa0Dyf7e6Ihm9XLhaxd0UmlbKW3aqvbes0kgMWAojmO+eYkoygWLQyNJjuTfZXZW2XbHTxyz8joztZgUK9sdUQCQS1MNA2pLQgkAIfqte18bnHxcrZhi7KEWJb1vLuN3hgf3jt634GJZLJHZJukuVxaX8wsZ5fmizeKJP3z2xeEMca+/+an9uzf87+/9bXzJ57tjgUZz9+5ZyIUaZUwlRXs17axUUcBBdp7INYPOAhWvZlLT96aOnvhnGm7iiCyv84puU/IxeszF6/PAEAsEu5oi8sigibK2GjrF3soJb7/T2JYvLOgqO3DYwdlu3zvkXvAg3pBB4GHkMhoMcBANRGAIs8CXgUPOSZwgixJmkvqDub+pbbuCsO0coXSVqa0VdKrzr86tHuud+naNcwxMUyTLWHMghgIMCKPFIHaBjCIRkLASUhQAfNgOrVaEQRhPbU9uzyPOIahlML/H2S20ucvXM2ks/eO9Ye0AIOA2Cb2bERMhAHkADAqsAryXNqoWnZTCAXqzeby4qLEi4woipRShmHwvw3MW9a/AoQIhfV8eX1zuzvZZeuG3TQdQ3dtCwPHMCJGLKKU2o5vmJwoEl40LW/hzrypm/8XZCy0eCnDy+0AAAAASUVORK5CYII=" ) ================================================ FILE: server/stats.go ================================================ package server import ( "fmt" "net" "time" "github.com/PagerDuty/godspeed" ) type RuntimeStats interface { LogStartup() Request(url string) ResponseTime(elapsed time.Duration, url string) Thumbnail(name string) Upload(source string) Error(code int) } type DiscardStats struct{} func (d *DiscardStats) LogStartup() {} func (d *DiscardStats) Request(url string) {} func (d *DiscardStats) ResponseTime(elapsed time.Duration, url string) {} func (d *DiscardStats) Thumbnail(name string) {} func (d *DiscardStats) Upload(source string) {} func (d *DiscardStats) Error(code int) {} type DatadogStats struct { dog *godspeed.Godspeed } func NewDatadogStats(datadogHost string) (*DatadogStats, error) { var ip net.IP = nil var err error = nil // Assume datadogHost is an IP and try to parse it ip = net.ParseIP(datadogHost) // Parsing failed if ip == nil { ips, _ := net.LookupIP(datadogHost) if len(ips) > 0 { ip = ips[0] } } if ip != nil { gdsp, err := godspeed.New(ip.String(), godspeed.DefaultPort, false) if err == nil { return &DatadogStats{gdsp}, nil } } return nil, err } func (d *DatadogStats) LogStartup() { d.dog.Incr("mandible.startup", nil) } func (d *DatadogStats) Request(url string) { tag := fmt.Sprintf("url:%s", url) d.dog.Incr("mandible.request", []string{tag}) } func (d *DatadogStats) ResponseTime(elapsed time.Duration, url string) { time := elapsed.Seconds() tag := fmt.Sprintf("url:%s", url) d.dog.Timing("mandible.responseTime", time, []string{tag}) } func (d *DatadogStats) Thumbnail(name string) { tag := fmt.Sprintf("size:%s", name) d.dog.Incr("mandible.thumbnail", []string{tag}) } func (d *DatadogStats) Upload(source string) { tag := fmt.Sprintf("source:%s", source) d.dog.Incr("mandible.upload", []string{tag}) } func (d *DatadogStats) Error(code int) { tag := fmt.Sprintf("code:%d", code) d.dog.Incr("mandible.error", []string{tag}) } ================================================ FILE: uploadedfile/thumbfile.go ================================================ package uploadedfile import ( "errors" "fmt" "math" "os" "regexp" "strconv" "github.com/Imgur/mandible/imageprocessor/processorcommand" "github.com/Imgur/mandible/imageprocessor/thumbType" ) var ( defaultQuality = 83 maxImageSideSize = 10000 ) type ThumbFile struct { localPath string Name string Width int MaxWidth int Height int MaxHeight int Shape string CropGravity string CropWidth int CropHeight int CropRatio string Quality int Format string StoreURI string DesiredFormat string NoStore bool } func NewThumbFile(width, maxWidth, height, maxHeight int, name, shape, path, cropGravity string, cropWidth, cropHeight int, cropRatio string, quality int, desiredFormat string, noStore bool) *ThumbFile { if quality == 0 { quality = defaultQuality } return &ThumbFile{ localPath: path, Name: name, Width: width, MaxWidth: maxWidth, Height: height, MaxHeight: maxHeight, Shape: shape, CropGravity: cropGravity, CropWidth: cropWidth, CropHeight: cropHeight, CropRatio: cropRatio, Quality: quality, Format: "", StoreURI: "", DesiredFormat: desiredFormat, NoStore: noStore, } } func (this *ThumbFile) GetNoStore() bool { return this.NoStore } func (this *ThumbFile) SetPath(path string) error { if _, err := os.Stat(path); os.IsNotExist(err) { return errors.New(fmt.Sprintf("Error when creating thumbnail %s", this.Name)) } this.localPath = path return nil } func (this *ThumbFile) GetPath() string { return this.localPath } func (this *ThumbFile) GetOutputFormat(original *UploadedFile) thumbType.ThumbType { if this.DesiredFormat != "" { return thumbType.FromString(this.DesiredFormat) } return thumbType.FromMime(original.GetMime()) } func (this *ThumbFile) ComputeWidth(original *UploadedFile) int { width := this.Width oWidth, _, err := original.Dimensions() if err != nil { return 0 } if this.MaxWidth > 0 { width = int(math.Min(float64(oWidth), float64(this.MaxWidth))) } return width } func (this *ThumbFile) ComputeHeight(original *UploadedFile) int { height := this.Height _, oHeight, err := original.Dimensions() if err != nil { return 0 } if this.MaxHeight > 0 { height = int(math.Min(float64(oHeight), float64(this.MaxHeight))) } return height } func (this *ThumbFile) ComputeCrop(original *UploadedFile) (int, int, error) { re := regexp.MustCompile("(.*):(.*)") matches := re.FindStringSubmatch(this.CropRatio) if len(matches) != 3 { return 0, 0, errors.New("Invalid crop_ratio") } wRatio, werr := strconv.ParseFloat(matches[1], 64) hRatio, herr := strconv.ParseFloat(matches[2], 64) if werr != nil || herr != nil { return 0, 0, errors.New("Invalid crop_ratio") } var cropWidth, cropHeight float64 if wRatio >= hRatio { wRatio = wRatio / hRatio hRatio = 1 cropWidth = math.Ceil(float64(this.ComputeHeight(original)) * wRatio) cropHeight = math.Ceil(float64(this.ComputeHeight(original)) * hRatio) } else { hRatio = hRatio / wRatio wRatio = 1 cropWidth = math.Ceil(float64(this.ComputeWidth(original)) * wRatio) cropHeight = math.Ceil(float64(this.ComputeWidth(original)) * hRatio) } return int(cropWidth), int(cropHeight), nil } func (this *ThumbFile) Process(original *UploadedFile) error { switch this.Shape { case "circle": return this.processCircle(original) case "thumb": return this.processThumb(original) case "square": return this.processSquare(original) case "custom": return this.processCustom(original) default: return this.processFull(original) } } func (this *ThumbFile) String() string { return fmt.Sprintf("Thumbnail of <%s>", this.Name) } func (this *ThumbFile) processSquare(original *UploadedFile) error { if this.Width == 0 { return errors.New("Width cannot be 0") } if this.Width > maxImageSideSize { return errors.New("Width too large") } filename, err := processorcommand.SquareThumb(original.GetPath(), this.Name, this.Width, this.Quality, this.GetOutputFormat(original)) if err != nil { return err } if err := this.SetPath(filename); err != nil { return err } return nil } func (this *ThumbFile) processCircle(original *UploadedFile) error { if this.Width == 0 { return errors.New("Width cannot be 0") } if this.Width > maxImageSideSize { return errors.New("Width too large") } //Circle thumbs should always be PNGs outputFormat := thumbType.FromString("png") filename, err := processorcommand.CircleThumb(original.GetPath(), this.Name, this.Width, this.Quality, outputFormat) if err != nil { return err } if err := this.SetPath(filename); err != nil { return err } return nil } func (this *ThumbFile) processThumb(original *UploadedFile) error { if this.Width == 0 { return errors.New("Width cannot be 0") } if this.Width > maxImageSideSize { return errors.New("Width too large") } if this.Height == 0 { return errors.New("Height cannot be 0") } if this.Height > maxImageSideSize { return errors.New("Height too large") } filename, err := processorcommand.Thumb(original.GetPath(), this.Name, this.Width, this.Height, this.Quality, this.GetOutputFormat(original)) if err != nil { return err } if err := this.SetPath(filename); err != nil { return err } return nil } func (this *ThumbFile) processCustom(original *UploadedFile) error { cropWidth := this.CropWidth cropHeight := this.CropHeight var err error if this.CropRatio != "" { cropWidth, cropHeight, err = this.ComputeCrop(original) if err != nil { return err } } width := this.ComputeWidth(original) height := this.ComputeHeight(original) validWidth := width > 0 && width <= maxImageSideSize validHeight := height > 0 && height <= maxImageSideSize if !validWidth && !validHeight { if !validWidth { return errors.New("Invalid width") } return errors.New("Invalid height") } filename, err := processorcommand.CustomThumb(original.GetPath(), this.Name, width, height, this.CropGravity, cropWidth, cropHeight, this.Quality, this.GetOutputFormat(original)) if err != nil { return err } if err := this.SetPath(filename); err != nil { return err } return nil } func (this *ThumbFile) processFull(original *UploadedFile) error { filename, err := processorcommand.Full(original.GetPath(), this.Name, this.Quality, this.GetOutputFormat(original)) if err != nil { return err } if err := this.SetPath(filename); err != nil { return err } return nil } ================================================ FILE: uploadedfile/uploadedfile.go ================================================ package uploadedfile import ( "errors" "image" "image/gif" "image/jpeg" "image/png" "net/http" "os" ) type UploadedFile struct { filename string path string mime string hash string ocrText string thumbs []*ThumbFile } var supportedTypes = map[string]bool{ "image/jpeg": true, "image/jpg": true, "image/gif": true, "image/png": true, } func NewUploadedFile(filename, path string, thumbs []*ThumbFile) (*UploadedFile, error) { file, err := os.Open(path) if err != nil { return nil, err } buff := make([]byte, 512) // http://golang.org/pkg/net/http/#DetectContentType _, err = file.Read(buff) if err != nil { return nil, err } filetype := http.DetectContentType(buff) if _, ok := supportedTypes[filetype]; !ok { return nil, errors.New("Unsupported file type!") } return &UploadedFile{ filename, path, filetype, "", "", thumbs, }, nil } func (this *UploadedFile) GetFilename() string { return this.filename } func (this *UploadedFile) SetFilename(filename string) { this.filename = filename } func (this *UploadedFile) GetHash() string { return this.hash } func (this *UploadedFile) SetHash(hash string) { this.hash = hash } func (this *UploadedFile) GetOCRText() string { return this.ocrText } func (this *UploadedFile) SetOCRText(text string) { this.ocrText = text } func (this *UploadedFile) SetPath(path string) { // TODO: find a better location for this os.Remove(this.path) this.path = path } func (this *UploadedFile) GetPath() string { return this.path } func (this *UploadedFile) GetMime() string { return this.mime } func (this *UploadedFile) SetMime(mime string) { this.mime = mime } func (this *UploadedFile) SetThumbs(thumbs []*ThumbFile) { this.thumbs = thumbs } func (this *UploadedFile) GetThumbs() []*ThumbFile { return this.thumbs } func (this *UploadedFile) FileSize() (int64, error) { f, err := os.Open(this.path) if err != nil { return 0, err } stats, err := f.Stat() if err != nil { return 0, err } size := stats.Size() return size, nil } func (this *UploadedFile) Clean() { os.Remove(this.path) for _, thumb := range this.thumbs { os.Remove(thumb.GetPath()) } } func (this *UploadedFile) Dimensions() (int, int, error) { f, err := os.Open(this.path) if err != nil { return 0, 0, err } var cfg image.Config switch true { case this.IsGif(): cfg, err = gif.DecodeConfig(f) case this.IsPng(): cfg, err = png.DecodeConfig(f) case this.IsJpeg(): cfg, err = jpeg.DecodeConfig(f) default: return 0, 0, errors.New("Invalid mime type!") } if err != nil { return 0, 0, err } return cfg.Width, cfg.Height, nil } func (this *UploadedFile) IsJpeg() bool { return (this.GetMime() == "image/jpeg" || this.GetMime() == "image/jpg") } func (this *UploadedFile) IsPng() bool { return this.GetMime() == "image/png" } func (this *UploadedFile) IsGif() bool { return this.GetMime() == "image/gif" } ================================================ FILE: vendor/github.com/PagerDuty/godspeed/.gitignore ================================================ # Misc *.swp # Compiled Object files, Static and Dynamic libs (Shared Objects) *.o *.a *.so # Folders _obj _test # Architecture specific extensions/prefixes *.[568vq] [568vq].out *.cgo1.go *.cgo2.c _cgo_defun.c _cgo_gotypes.go _cgo_export.* _testmain.go *.exe *.test *.prof ================================================ FILE: vendor/github.com/PagerDuty/godspeed/.travis.yml ================================================ language: go go: - 1.5.3 branches: only: - master script: go test -v ./... -check.vv sudo: false ================================================ FILE: vendor/github.com/PagerDuty/godspeed/LICENSE ================================================ Copyright (c) 2014-2015, PagerDuty Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of PagerDuty nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL PagerDuty OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================ FILE: vendor/github.com/PagerDuty/godspeed/README.md ================================================ # Godspeed [![TravisCI Build Status](https://img.shields.io/travis/PagerDuty/godspeed/master.svg?style=flat)](https://travis-ci.org/PagerDuty/godspeed) [![GoDoc](https://img.shields.io/badge/godspeed-GoDoc-blue.svg?style=flat)](https://godoc.org/github.com/PagerDuty/godspeed) [![License](https://img.shields.io/badge/License-BSD_3--Clause-brightgreen.svg?style=flat)](https://github.com/PagerDuty/godspeed/blob/master/LICENSE) Godspeed is a statsd client for the Datadog extension of statsd (DogStatsD). The name `godspeed` is a bit of a rhyming slang twist on DogStatsD. It's also a poke at the fact that the statsd protocol's transport mechanism is UDP... Check out [GoDoc](https://godoc.org/github.com/PagerDuty/godspeed) for the docs as well as some examples. DogStatsD is a copyright of `Datadog `. ## License Godspeed is released under the BSD 3-Clause License. See the `LICENSE` file for the full contents of the license. ## Installation ``` go get -u github.com/PagerDuty/godspeed ``` ## Usage For more details either look at the `_example_test.go` files directly or view the examples on [GoDoc](https://godoc.org/github.com/PagerDuty/godspeed#pkg-examples). ### Emitting a gauge ```Go g, err := godspeed.NewDefault() if err != nil { // handle error } defer g.Conn.Close() err = g.Gauge("example.stat", 1, nil) if err != nil { // handle error } ``` ### Emitting an event ```Go // make sure to handle the error g, _ := godspeed.NewDefault() defer g.Conn.Close() title := "Nginx service restart" text := "The Nginx service has been restarted" // the optionals are for the optional arguments available for an event // http://docs.datadoghq.com/guides/dogstatsd/#fields optionals := make(map[string]string) optionals["alert_type"] = "info" optionals["source_type_name"] = "nginx" addlTags := []string{"source_type:nginx"} err := g.Event(title, text, optionals, addlTags) if err != nil { fmt.Println("err:", err) } ``` ================================================ FILE: vendor/github.com/PagerDuty/godspeed/async.go ================================================ // Copyright 2014-2015 PagerDuty, Inc, et al. All rights reserved. // Use of this source code is governed by the BSD 3-Clause // license that can be found in the LICENSE file. package godspeed import "sync" // AsyncGodspeed is used for asynchronous Godspeed calls. // The AsyncGodspeed emission methods have an additional argument // for a *sync.WaitGroup to have the method indicate when finished. type AsyncGodspeed struct { // Godspeed is an instance of Godspeed Godspeed *Godspeed // W is a *sync.WaitGroup used for blocking application execution // when you want to wait for stats to be emitted. // This is here as a convenience, and you can use your own WaitGroup // in any AsyncGodspeed method calls. W *sync.WaitGroup } // NewAsync returns an instance of AsyncGodspeed. This is the more async-friendly version of Godspeed // autoTruncate dictactes whether long stats emissions get auto-truncated or dropped. Unfortunately, // Events will always be dropped. If you need monitor your events, you can access the Godspeed instance // directly. func NewAsync(host string, port int, autoTruncate bool) (a *AsyncGodspeed, err error) { gs, err := New(host, port, autoTruncate) if err != nil { return nil, err } a = &AsyncGodspeed{ Godspeed: gs, W: new(sync.WaitGroup), } return } // NewDefaultAsync is just like NewAsync except it uses the DefaultHost and DefaultPort func NewDefaultAsync() (a *AsyncGodspeed, err error) { a, err = NewAsync(DefaultHost, DefaultPort, false) return } // AddTag is identical to that within the Godspeed client func (a *AsyncGodspeed) AddTag(tag string) []string { return a.Godspeed.AddTag(tag) } // AddTags is identical to that within the Godspeed client func (a *AsyncGodspeed) AddTags(tags []string) []string { return a.Godspeed.AddTags(tags) } // SetNamespace is identical to that within the Godspeed client func (a *AsyncGodspeed) SetNamespace(ns string) { a.Godspeed.SetNamespace(ns) } // Event is almost identical to that within the Godspeed client // The only chnage is that it has no return value, and takes a // (sync.WaitGroup) argument func (a *AsyncGodspeed) Event(title, body string, keys map[string]string, tags []string, y *sync.WaitGroup) { defer y.Done() a.Godspeed.Event(title, body, keys, tags) } // Send is almost identical to that within the Godspeed client // with the addition of an argument and removal of the return value func (a *AsyncGodspeed) Send(stat, kind string, delta, sampleRate float64, tags []string, y *sync.WaitGroup) { defer y.Done() a.Godspeed.Send(stat, kind, delta, sampleRate, tags) } // ServiceCheck is almost identical to that within the Godspeed client // with the addition of an argument and removal of the return value func (a *AsyncGodspeed) ServiceCheck(name string, status int, fields map[string]string, tags []string, y *sync.WaitGroup) { if y != nil { defer y.Done() } a.Godspeed.ServiceCheck(name, status, fields, tags) } // Count is almost identical to that within the Godspeed client // As with the other AsyncGodpseed functions it omits a return value and // takes a *sync.WaitGroup instance func (a *AsyncGodspeed) Count(stat string, count float64, tags []string, y *sync.WaitGroup) { defer y.Done() a.Godspeed.Count(stat, count, tags) } // Incr is almost identical to that within the Godspeed client, // except it has no return value and takes a *sync.WaitGroup argument. func (a *AsyncGodspeed) Incr(stat string, tags []string, y *sync.WaitGroup) { defer y.Done() a.Godspeed.Incr(stat, tags) } // Decr is almost identical to that within the Godspeed client. It has // no return value and takes a *sync.WaitGroup argument. // // Also, I've gotten tired of typing "Xxx is almost identical to that within..." so congrats // on making it this far in to the docs. func (a *AsyncGodspeed) Decr(stat string, tags []string, y *sync.WaitGroup) { defer y.Done() a.Godspeed.Decr(stat, tags) } // Gauge is almost identical to that within the Godspeed client. // Here it has no return value, and takes a *sync.WaitGroup argument func (a *AsyncGodspeed) Gauge(stat string, value float64, tags []string, y *sync.WaitGroup) { defer y.Done() a.Godspeed.Gauge(stat, value, tags) } // Histogram is almost identical to that within the Godspeed client. // Within AsyncGodspeed it has no return value, and also takes a *sync.WaitGroup argument func (a *AsyncGodspeed) Histogram(stat string, value float64, tags []string, y *sync.WaitGroup) { defer y.Done() a.Godspeed.Histogram(stat, value, tags) } // Timing is almost identical to that within the Godspeed client. // The return value is removed, and it takes a *sync.WaitGroup argument here func (a *AsyncGodspeed) Timing(stat string, value float64, tags []string, y *sync.WaitGroup) { defer y.Done() a.Godspeed.Timing(stat, value, tags) } // Set is almost identical to that within the Godspeed client func (a *AsyncGodspeed) Set(stat string, value float64, tags []string, y *sync.WaitGroup) { defer y.Done() a.Godspeed.Set(stat, value, tags) } ================================================ FILE: vendor/github.com/PagerDuty/godspeed/events.go ================================================ // Copyright 2014-2015 PagerDuty, Inc, et al. All rights reserved. // Use of this source code is governed by the BSD 3-Clause // license that can be found in the LICENSE file. package godspeed import ( "bytes" "fmt" "strings" ) var eventKeys = []string{"date_happened", "hostname", "aggregation_key", "priority", "source_type_name", "alert_type"} var eventMarkers = []rune{'d', 'h', 'k', 'p', 's', 't'} func escapeEvent(s string) string { return strings.NewReplacer("\n", "\\n").Replace(s) } func removePipes(s string) string { return strings.Replace(s, "|", "", -1) } // Event is the function for submitting a Datadog event. // This is a Datadog-specific emission and most likely will not work on other statsd implementations. // title and body are both strings, and are the title and body of the event respectively. // field can be used to send the optional keys. func (g *Godspeed) Event(title, text string, fields map[string]string, tags []string) error { if len(title) < 1 { return fmt.Errorf("title must have at least one character") } if len(text) < 1 { return fmt.Errorf("body must have at least one character") } var buf bytes.Buffer title = escapeEvent(title) text = escapeEvent(text) buf.WriteString(fmt.Sprintf("_e{%d,%d}:%v|%v", len(title), len(text), title, text)) // if some fields were passed in convert them to their proper format // and write that to the buffer if len(fields) > 0 { for i, v := range eventKeys { if mv, ok := fields[v]; ok { buf.WriteString(fmt.Sprintf("|%v:%v", string(eventMarkers[i]), removePipes(mv))) } } } tags = uniqueTags(append(g.Tags, tags...)) if len(tags) > 0 { for i, v := range tags { tags[i] = strings.Replace(v, "|", "", -1) } buf.WriteString(fmt.Sprintf("|#%v", strings.Join(tags, ","))) } // this handles the logic for truncation // if the buffer length is larger than the max, return an error // else just write it if bufLen := buf.Len(); bufLen > MaxBytes { return fmt.Errorf("error sending %v, packet larger than %d (%d)", string(title), MaxBytes, buf.Len()) } _, err := g.Conn.Write(buf.Bytes()) return err } ================================================ FILE: vendor/github.com/PagerDuty/godspeed/godspeed.go ================================================ // Copyright 2014-2015 PagerDuty, Inc, et al. All rights reserved. // Use of this source code is governed by the BSD 3-Clause // license that can be found in the LICENSE file. // Package godspeed is a statsd client for the Datadog extension of statsd // called DogStatsD. It can be used to emit statsd stats, Datadog-specific // events, and DogStatsD service checks. This client also has the ability to // tag all outgoing statsd metrics. Godspeed is meant for synchronous calls, // while AsyncGodspeed is used for what it says on the tin. // // The name godspeed is a bit of a rhyming slang twist on DogStatsD. It's // also a poke at the fact that the statsd protocol's transport mechanism // is UDP. // // DogStatsD is a copyright of Datadog package godspeed import ( "fmt" "net" ) const ( // DefaultHost is 127.0.0.1 (localhost) DefaultHost = "127.0.0.1" // DefaultPort is 8125 DefaultPort = 8125 // MaxBytes is the largest UDP datagram we will try to send MaxBytes = 8192 ) // Godspeed is an unbuffered Statsd client with compatibility geared towards the Datadog statsd format // It consists of Conn (*net.UDPConn) object for sending metrics over UDP, // Namespace (string) for namespacing metrics, and Tags ([]string) for tags to send with stats type Godspeed struct { // Conn is the UDP connection used for sending the statsd emissions Conn *net.UDPConn // Namespace is the namespace all stats emissions are prefixed with: // . Namespace string // Tags is the slice of tags to append to each stat emission Tags []string // AutoTruncate specifies whether or not we will try to truncate a stat // before emitting it or just return an error. This is most helpful when // using AsyncGodspeed. However, it can result in invalid stat being emitted // due to the body being truncated. Meant for when a single emission would // be greater than 8192 bytes. AutoTruncate bool } // New returns a new instance of a Godspeed statsd client. // This method takes the host as a string, and port as an int. // There is also the ability for autoTruncate. If your metric is longer than MaxBytes // autoTruncate can be used to truncate the message instead of erroring. This doesn't work // on events and will always return an error. func New(host string, port int, autoTruncate bool) (g *Godspeed, err error) { // build a new UDP dialer addr, err := net.ResolveUDPAddr("udp", fmt.Sprintf("%s:%d", host, port)) if err != nil { return nil, err } c, err := net.DialUDP("udp", nil, addr) // if it failed return a pointer to an empty Godspeed struct, and the error if err != nil { return nil, err } // build a new Godspeed struct with the UDPConn g = &Godspeed{ Conn: c, Tags: make([]string, 0), AutoTruncate: autoTruncate, } return } // NewDefault is the same as New() except it uses DefaultHost and DefaultPort for the connection. func NewDefault() (g *Godspeed, err error) { g, err = New(DefaultHost, DefaultPort, false) return } // AddTag allows you to add a tag for all future emitted stats. // It takes the tag as a string, and returns a []string containing all Godspeed tags func (g *Godspeed) AddTag(tag string) []string { // return early if the tag already exists for _, v := range g.Tags { if tag == v { return g.Tags } } // add the tag g.Tags = append(g.Tags, tag) return g.Tags } // AddTags is like AddTag(), except it tages a []string and adds each contained string // This also returns a []string containing the current tags func (g *Godspeed) AddTags(tags []string) []string { // if we already have tags add each tag one at a time // otherwise unique the list and assign it directly if len(g.Tags) > 0 { for _, tag := range tags { g.AddTag(tag) } } else { g.Tags = uniqueTags(tags) } return g.Tags } // SetNamespace allows you to prefix all of your metrics with a certain namespace func (g *Godspeed) SetNamespace(ns string) { g.Namespace = trimReserved(ns) } ================================================ FILE: vendor/github.com/PagerDuty/godspeed/service_checks.go ================================================ // Copyright 2014-2015 PagerDuty, Inc, et al. All rights reserved. // Use of this source code is governed by the BSD 3-Clause // license that can be found in the LICENSE file. package godspeed import ( "bytes" "fmt" "strings" ) var scKeys = []string{"service_check_message", "timestamp", "hostname"} var scMark = []string{"m", "d", "h"} // ServiceCheck is a function to emit DogStatsD service checks // to the local DD agent. It takes the name of the service, // which must NOT contain a pipe (|) character, and the numeric // status for the service. The status values are the same as Nagios: // // OK = 0, WARNING = 1, CRITICAL = 2, UNKNOWN = 3 // // This functionality is an extension to the statsd // protocol by Datadog (DogStatsD): // // http://docs.datadoghq.com/guides/dogstatsd/#service-checks func (g *Godspeed) ServiceCheck(name string, status int, fields map[string]string, tags []string) error { if len(name) == 0 { return fmt.Errorf("service name must have at least one character") } if status < 0 || status > 3 { return fmt.Errorf("unknown service status (%d); known values: 0,1,2,3", status) } if strings.ContainsAny("|", name) { return fmt.Errorf("service name '%s' may not include pipe character ('|')", name) } var buf bytes.Buffer buf.WriteString(fmt.Sprintf("_sc|%s|%d", name, status)) if len(fields) > 0 { for i, v := range scKeys { if mv, ok := fields[v]; ok { buf.WriteString(fmt.Sprintf("|%s:%s", scMark[i], removePipes(mv))) } } } tags = uniqueTags(append(g.Tags, tags...)) if len(tags) > 0 { for i, v := range tags { tags[i] = strings.Replace(v, "|", "", -1) } buf.WriteString(fmt.Sprintf("|#%s", strings.Join(tags, ","))) } if bufLen := buf.Len(); bufLen > MaxBytes { return fmt.Errorf("error sending %s service check, packet larger than %d (%d)", name, MaxBytes, bufLen) } _, err := g.Conn.Write(buf.Bytes()) return err } ================================================ FILE: vendor/github.com/PagerDuty/godspeed/shared.go ================================================ // Copyright 2014-2015 PagerDuty, Inc, et al. All rights reserved. // Use of this source code is governed by the BSD 3-Clause // license that can be found in the LICENSE file. package godspeed import "strings" // stats names can't include :, |, or @ func trimReserved(s string) string { return strings.NewReplacer(":", "_", "|", "_", "@", "_").Replace(s) } // function to make sure tags are unique func uniqueTags(t []string) []string { // if the tag slice is empty avoid allocation if len(t) < 1 { return nil } // build a map to track which values we've seen s := make(map[string]bool) // loop over each string provided // if the value is not in the map then replace // the value at t[len(s)] so that we always have // only unique tags at the beginning of the slice for i, v := range t { if _, x := s[v]; !x { // only change the value if needed if i != len(s) { t[len(s)] = v } s[v] = true } } // based on the size of the map we know // how many unique tags there were // so return that slice return []string(t[:len(s)]) } ================================================ FILE: vendor/github.com/PagerDuty/godspeed/stats.go ================================================ // Copyright 2014-2015 PagerDuty, Inc, et al. All rights reserved. // Use of this source code is governed by the BSD 3-Clause // license that can be found in the LICENSE file. package godspeed import ( "bytes" "fmt" "math/rand" "strconv" "strings" ) // Send is the function for emitting the metrics to statsd // It takes the name of the stat as a string, as well as the kind. // The kind is "g" for gauge, "c" for count, "ms" for timing, etc. // This returns any error hit during the flushing of the stat func (g *Godspeed) Send(stat, kind string, delta, sampleRate float64, tags []string) (err error) { // if the connection hasn't been set up yet if g.Conn == nil { return fmt.Errorf("socket not created") } // return if the sample rate is less than 1 and the random number is less than the sample rate if sampleRate < 1 && rand.Float64() >= sampleRate { return nil } var buffer bytes.Buffer // if we have a namespace write it to the byte buffer if len(g.Namespace) > 0 { buffer.WriteString(fmt.Sprintf("%v.", g.Namespace)) } floatStr := strconv.FormatFloat(delta, 'f', -1, 64) // write the name of the metric to the byte buffer as well as the metric itself buffer.WriteString(fmt.Sprintf("%v:%v|%v", string(trimReserved(stat)), floatStr, kind)) // if the sample rate is less than 1 add it too if sampleRate < 1 { floatStr = strconv.FormatFloat(sampleRate, 'f', -1, 64) buffer.WriteString(fmt.Sprintf("|@%v", floatStr)) } // add any provided tags to the metric tags = uniqueTags(append(g.Tags, tags...)) if len(tags) > 0 { buffer.WriteString(fmt.Sprintf("|#%v", strings.Join(tags, ","))) } // this handles the logic for truncation // if the buffer length is smaller than the max, just write it // else if AutoTruncate is enabled truncate/write the bytes // else generate an error to return if buffer.Len() <= MaxBytes { _, err = g.Conn.Write(buffer.Bytes()) } else if g.AutoTruncate { _, err = g.Conn.Write(buffer.Bytes()[0:MaxBytes]) } else { err = fmt.Errorf("error sending %v, packet larger than %d (%d)", stat, MaxBytes, buffer.Len()) } return } // Count wraps Send() and simplifies the interface for Count stats func (g *Godspeed) Count(stat string, count float64, tags []string) error { return g.Send(stat, "c", count, 1, append(g.Tags, tags...)) } // Incr wraps Send() and simplifies the interface for incrementing a counter // It only takes the name of the stat, and tags func (g *Godspeed) Incr(stat string, tags []string) error { return g.Count(stat, 1, append(g.Tags, tags...)) } // Decr wraps Send() and simplifies the interface for decrementing a counter // It only takes the name of the stat, and tags func (g *Godspeed) Decr(stat string, tags []string) error { return g.Count(stat, -1, append(g.Tags, tags...)) } // Gauge wraps Send() and simplifies the interface for Gauge stats func (g *Godspeed) Gauge(stat string, value float64, tags []string) error { return g.Send(stat, "g", value, 1, append(g.Tags, tags...)) } // Histogram wraps Send() and simplifies the interface for Histogram stats func (g *Godspeed) Histogram(stat string, value float64, tags []string) error { return g.Send(stat, "h", value, 1, append(g.Tags, tags...)) } // Timing wraps Send() and simplifies the interface for Timing stats func (g *Godspeed) Timing(stat string, value float64, tags []string) error { return g.Send(stat, "ms", value, 1, append(g.Tags, tags...)) } // Set wraps Send() and simplifies the interface for Timing stats func (g *Godspeed) Set(stat string, value float64, tags []string) error { return g.Send(stat, "s", value, 1, append(g.Tags, tags...)) } ================================================ FILE: vendor/github.com/bradfitz/http2/.gitignore ================================================ *~ h2i/h2i ================================================ FILE: vendor/github.com/bradfitz/http2/AUTHORS ================================================ # This file is like Go's AUTHORS file: it lists Copyright holders. # The list of humans who have contributd is in the CONTRIBUTORS file. # # To contribute to this project, because it will eventually be folded # back in to Go itself, you need to submit a CLA: # # http://golang.org/doc/contribute.html#copyright # # Then you get added to CONTRIBUTORS and you or your company get added # to the AUTHORS file. Blake Mizerany github=bmizerany Daniel Morsing github=DanielMorsing Gabriel Aszalos github=gbbr Google, Inc. Keith Rarick github=kr Matthew Keenan github=mattkeenan Matt Layher github=mdlayher Perry Abbott github=pabbott0 Tatsuhiro Tsujikawa github=tatsuhiro-t ================================================ FILE: vendor/github.com/bradfitz/http2/CONTRIBUTORS ================================================ # This file is like Go's CONTRIBUTORS file: it lists humans. # The list of copyright holders (which may be companies) are in the AUTHORS file. # # To contribute to this project, because it will eventually be folded # back in to Go itself, you need to submit a CLA: # # http://golang.org/doc/contribute.html#copyright # # Then you get added to CONTRIBUTORS and you or your company get added # to the AUTHORS file. Blake Mizerany github=bmizerany Brad Fitzpatrick github=bradfitz Daniel Morsing github=DanielMorsing Gabriel Aszalos github=gbbr Keith Rarick github=kr Matthew Keenan github=mattkeenan Matt Layher github=mdlayher Perry Abbott github=pabbott0 Tatsuhiro Tsujikawa github=tatsuhiro-t ================================================ FILE: vendor/github.com/bradfitz/http2/Dockerfile ================================================ # # This Dockerfile builds a recent curl with HTTP/2 client support, using # a recent nghttp2 build. # # See the Makefile for how to tag it. If Docker and that image is found, the # Go tests use this curl binary for integration tests. # FROM ubuntu:trusty RUN apt-get update && \ apt-get upgrade -y && \ apt-get install -y git-core build-essential wget RUN apt-get install -y --no-install-recommends \ autotools-dev libtool pkg-config zlib1g-dev \ libcunit1-dev libssl-dev libxml2-dev libevent-dev \ automake autoconf # Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached: ENV NGHTTP2_VER af24f8394e43f4 RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git WORKDIR /root/nghttp2 RUN git reset --hard $NGHTTP2_VER RUN autoreconf -i RUN automake RUN autoconf RUN ./configure RUN make RUN make install WORKDIR /root RUN wget http://curl.haxx.se/download/curl-7.40.0.tar.gz RUN tar -zxvf curl-7.40.0.tar.gz WORKDIR /root/curl-7.40.0 RUN ./configure --with-ssl --with-nghttp2=/usr/local RUN make RUN make install RUN ldconfig CMD ["-h"] ENTRYPOINT ["/usr/local/bin/curl"] ================================================ FILE: vendor/github.com/bradfitz/http2/HACKING ================================================ We only accept contributions from users who have gone through Go's contribution process (signed a CLA). Please acknowledge whether you have (and use the same email) if sending a pull request. ================================================ FILE: vendor/github.com/bradfitz/http2/LICENSE ================================================ Copyright 2014 Google & the Go AUTHORS Go AUTHORS are: See https://code.google.com/p/go/source/browse/AUTHORS Licensed under the terms of Go itself: https://code.google.com/p/go/source/browse/LICENSE ================================================ FILE: vendor/github.com/bradfitz/http2/Makefile ================================================ curlimage: docker build -t gohttp2/curl . ================================================ FILE: vendor/github.com/bradfitz/http2/README ================================================ This is a work-in-progress HTTP/2 implementation for Go. It will eventually live in the Go standard library and won't require any changes to your code to use. It will just be automatic. Status: * The server support is pretty good. A few things are missing but are being worked on. * The client work has just started but shares a lot of code is coming along much quicker. Docs are at https://godoc.org/github.com/bradfitz/http2 Demo test server at https://http2.golang.org/ Help & bug reports welcome. ================================================ FILE: vendor/github.com/bradfitz/http2/buffer.go ================================================ // Copyright 2014 The Go Authors. // See https://code.google.com/p/go/source/browse/CONTRIBUTORS // Licensed under the same terms as Go itself: // https://code.google.com/p/go/source/browse/LICENSE package http2 import ( "errors" ) // buffer is an io.ReadWriteCloser backed by a fixed size buffer. // It never allocates, but moves old data as new data is written. type buffer struct { buf []byte r, w int closed bool err error // err to return to reader } var ( errReadEmpty = errors.New("read from empty buffer") errWriteClosed = errors.New("write on closed buffer") errWriteFull = errors.New("write on full buffer") ) // Read copies bytes from the buffer into p. // It is an error to read when no data is available. func (b *buffer) Read(p []byte) (n int, err error) { n = copy(p, b.buf[b.r:b.w]) b.r += n if b.closed && b.r == b.w { err = b.err } else if b.r == b.w && n == 0 { err = errReadEmpty } return n, err } // Len returns the number of bytes of the unread portion of the buffer. func (b *buffer) Len() int { return b.w - b.r } // Write copies bytes from p into the buffer. // It is an error to write more data than the buffer can hold. func (b *buffer) Write(p []byte) (n int, err error) { if b.closed { return 0, errWriteClosed } // Slide existing data to beginning. if b.r > 0 && len(p) > len(b.buf)-b.w { copy(b.buf, b.buf[b.r:b.w]) b.w -= b.r b.r = 0 } // Write new data. n = copy(b.buf[b.w:], p) b.w += n if n < len(p) { err = errWriteFull } return n, err } // Close marks the buffer as closed. Future calls to Write will // return an error. Future calls to Read, once the buffer is // empty, will return err. func (b *buffer) Close(err error) { if !b.closed { b.closed = true b.err = err } } ================================================ FILE: vendor/github.com/bradfitz/http2/errors.go ================================================ // Copyright 2014 The Go Authors. // See https://code.google.com/p/go/source/browse/CONTRIBUTORS // Licensed under the same terms as Go itself: // https://code.google.com/p/go/source/browse/LICENSE package http2 import "fmt" // An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec. type ErrCode uint32 const ( ErrCodeNo ErrCode = 0x0 ErrCodeProtocol ErrCode = 0x1 ErrCodeInternal ErrCode = 0x2 ErrCodeFlowControl ErrCode = 0x3 ErrCodeSettingsTimeout ErrCode = 0x4 ErrCodeStreamClosed ErrCode = 0x5 ErrCodeFrameSize ErrCode = 0x6 ErrCodeRefusedStream ErrCode = 0x7 ErrCodeCancel ErrCode = 0x8 ErrCodeCompression ErrCode = 0x9 ErrCodeConnect ErrCode = 0xa ErrCodeEnhanceYourCalm ErrCode = 0xb ErrCodeInadequateSecurity ErrCode = 0xc ErrCodeHTTP11Required ErrCode = 0xd ) var errCodeName = map[ErrCode]string{ ErrCodeNo: "NO_ERROR", ErrCodeProtocol: "PROTOCOL_ERROR", ErrCodeInternal: "INTERNAL_ERROR", ErrCodeFlowControl: "FLOW_CONTROL_ERROR", ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT", ErrCodeStreamClosed: "STREAM_CLOSED", ErrCodeFrameSize: "FRAME_SIZE_ERROR", ErrCodeRefusedStream: "REFUSED_STREAM", ErrCodeCancel: "CANCEL", ErrCodeCompression: "COMPRESSION_ERROR", ErrCodeConnect: "CONNECT_ERROR", ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM", ErrCodeInadequateSecurity: "INADEQUATE_SECURITY", ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED", } func (e ErrCode) String() string { if s, ok := errCodeName[e]; ok { return s } return fmt.Sprintf("unknown error code 0x%x", uint32(e)) } // ConnectionError is an error that results in the termination of the // entire connection. type ConnectionError ErrCode func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) } // StreamError is an error that only affects one stream within an // HTTP/2 connection. type StreamError struct { StreamID uint32 Code ErrCode } func (e StreamError) Error() string { return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code) } // 6.9.1 The Flow Control Window // "If a sender receives a WINDOW_UPDATE that causes a flow control // window to exceed this maximum it MUST terminate either the stream // or the connection, as appropriate. For streams, [...]; for the // connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code." type goAwayFlowError struct{} func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" } ================================================ FILE: vendor/github.com/bradfitz/http2/flow.go ================================================ // Copyright 2014 The Go Authors. // See https://code.google.com/p/go/source/browse/CONTRIBUTORS // Licensed under the same terms as Go itself: // https://code.google.com/p/go/source/browse/LICENSE // Flow control package http2 // flow is the flow control window's size. type flow struct { // n is the number of DATA bytes we're allowed to send. // A flow is kept both on a conn and a per-stream. n int32 // conn points to the shared connection-level flow that is // shared by all streams on that conn. It is nil for the flow // that's on the conn directly. conn *flow } func (f *flow) setConnFlow(cf *flow) { f.conn = cf } func (f *flow) available() int32 { n := f.n if f.conn != nil && f.conn.n < n { n = f.conn.n } return n } func (f *flow) take(n int32) { if n > f.available() { panic("internal error: took too much") } f.n -= n if f.conn != nil { f.conn.n -= n } } // add adds n bytes (positive or negative) to the flow control window. // It returns false if the sum would exceed 2^31-1. func (f *flow) add(n int32) bool { remain := (1<<31 - 1) - f.n if n > remain { return false } f.n += n return true } ================================================ FILE: vendor/github.com/bradfitz/http2/frame.go ================================================ // Copyright 2014 The Go Authors. // See https://code.google.com/p/go/source/browse/CONTRIBUTORS // Licensed under the same terms as Go itself: // https://code.google.com/p/go/source/browse/LICENSE package http2 import ( "bytes" "encoding/binary" "errors" "fmt" "io" "sync" ) const frameHeaderLen = 9 var padZeros = make([]byte, 255) // zeros for padding // A FrameType is a registered frame type as defined in // http://http2.github.io/http2-spec/#rfc.section.11.2 type FrameType uint8 const ( FrameData FrameType = 0x0 FrameHeaders FrameType = 0x1 FramePriority FrameType = 0x2 FrameRSTStream FrameType = 0x3 FrameSettings FrameType = 0x4 FramePushPromise FrameType = 0x5 FramePing FrameType = 0x6 FrameGoAway FrameType = 0x7 FrameWindowUpdate FrameType = 0x8 FrameContinuation FrameType = 0x9 ) var frameName = map[FrameType]string{ FrameData: "DATA", FrameHeaders: "HEADERS", FramePriority: "PRIORITY", FrameRSTStream: "RST_STREAM", FrameSettings: "SETTINGS", FramePushPromise: "PUSH_PROMISE", FramePing: "PING", FrameGoAway: "GOAWAY", FrameWindowUpdate: "WINDOW_UPDATE", FrameContinuation: "CONTINUATION", } func (t FrameType) String() string { if s, ok := frameName[t]; ok { return s } return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t)) } // Flags is a bitmask of HTTP/2 flags. // The meaning of flags varies depending on the frame type. type Flags uint8 // Has reports whether f contains all (0 or more) flags in v. func (f Flags) Has(v Flags) bool { return (f & v) == v } // Frame-specific FrameHeader flag bits. const ( // Data Frame FlagDataEndStream Flags = 0x1 FlagDataPadded Flags = 0x8 // Headers Frame FlagHeadersEndStream Flags = 0x1 FlagHeadersEndHeaders Flags = 0x4 FlagHeadersPadded Flags = 0x8 FlagHeadersPriority Flags = 0x20 // Settings Frame FlagSettingsAck Flags = 0x1 // Ping Frame FlagPingAck Flags = 0x1 // Continuation Frame FlagContinuationEndHeaders Flags = 0x4 FlagPushPromiseEndHeaders Flags = 0x4 FlagPushPromisePadded Flags = 0x8 ) var flagName = map[FrameType]map[Flags]string{ FrameData: { FlagDataEndStream: "END_STREAM", FlagDataPadded: "PADDED", }, FrameHeaders: { FlagHeadersEndStream: "END_STREAM", FlagHeadersEndHeaders: "END_HEADERS", FlagHeadersPadded: "PADDED", FlagHeadersPriority: "PRIORITY", }, FrameSettings: { FlagSettingsAck: "ACK", }, FramePing: { FlagPingAck: "ACK", }, FrameContinuation: { FlagContinuationEndHeaders: "END_HEADERS", }, FramePushPromise: { FlagPushPromiseEndHeaders: "END_HEADERS", FlagPushPromisePadded: "PADDED", }, } // a frameParser parses a frame given its FrameHeader and payload // bytes. The length of payload will always equal fh.Length (which // might be 0). type frameParser func(fh FrameHeader, payload []byte) (Frame, error) var frameParsers = map[FrameType]frameParser{ FrameData: parseDataFrame, FrameHeaders: parseHeadersFrame, FramePriority: parsePriorityFrame, FrameRSTStream: parseRSTStreamFrame, FrameSettings: parseSettingsFrame, FramePushPromise: parsePushPromise, FramePing: parsePingFrame, FrameGoAway: parseGoAwayFrame, FrameWindowUpdate: parseWindowUpdateFrame, FrameContinuation: parseContinuationFrame, } func typeFrameParser(t FrameType) frameParser { if f := frameParsers[t]; f != nil { return f } return parseUnknownFrame } // A FrameHeader is the 9 byte header of all HTTP/2 frames. // // See http://http2.github.io/http2-spec/#FrameHeader type FrameHeader struct { valid bool // caller can access []byte fields in the Frame // Type is the 1 byte frame type. There are ten standard frame // types, but extension frame types may be written by WriteRawFrame // and will be returned by ReadFrame (as UnknownFrame). Type FrameType // Flags are the 1 byte of 8 potential bit flags per frame. // They are specific to the frame type. Flags Flags // Length is the length of the frame, not including the 9 byte header. // The maximum size is one byte less than 16MB (uint24), but only // frames up to 16KB are allowed without peer agreement. Length uint32 // StreamID is which stream this frame is for. Certain frames // are not stream-specific, in which case this field is 0. StreamID uint32 } // Header returns h. It exists so FrameHeaders can be embedded in other // specific frame types and implement the Frame interface. func (h FrameHeader) Header() FrameHeader { return h } func (h FrameHeader) String() string { var buf bytes.Buffer buf.WriteString("[FrameHeader ") buf.WriteString(h.Type.String()) if h.Flags != 0 { buf.WriteString(" flags=") set := 0 for i := uint8(0); i < 8; i++ { if h.Flags&(1< 1 { buf.WriteByte('|') } name := flagName[h.Type][Flags(1<>24), byte(streamID>>16), byte(streamID>>8), byte(streamID)) } func (f *Framer) endWrite() error { // Now that we know the final size, fill in the FrameHeader in // the space previously reserved for it. Abuse append. length := len(f.wbuf) - frameHeaderLen if length >= (1 << 24) { return ErrFrameTooLarge } _ = append(f.wbuf[:0], byte(length>>16), byte(length>>8), byte(length)) n, err := f.w.Write(f.wbuf) if err == nil && n != len(f.wbuf) { err = io.ErrShortWrite } return err } func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) } func (f *Framer) writeBytes(v []byte) { f.wbuf = append(f.wbuf, v...) } func (f *Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) } func (f *Framer) writeUint32(v uint32) { f.wbuf = append(f.wbuf, byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) } const ( minMaxFrameSize = 1 << 14 maxFrameSize = 1<<24 - 1 ) // NewFramer returns a Framer that writes frames to w and reads them from r. func NewFramer(w io.Writer, r io.Reader) *Framer { fr := &Framer{ w: w, r: r, } fr.getReadBuf = func(size uint32) []byte { if cap(fr.readBuf) >= int(size) { return fr.readBuf[:size] } fr.readBuf = make([]byte, size) return fr.readBuf } fr.SetMaxReadFrameSize(maxFrameSize) return fr } // SetMaxReadFrameSize sets the maximum size of a frame // that will be read by a subsequent call to ReadFrame. // It is the caller's responsibility to advertise this // limit with a SETTINGS frame. func (fr *Framer) SetMaxReadFrameSize(v uint32) { if v > maxFrameSize { v = maxFrameSize } fr.maxReadSize = v } // ErrFrameTooLarge is returned from Framer.ReadFrame when the peer // sends a frame that is larger than declared with SetMaxReadFrameSize. var ErrFrameTooLarge = errors.New("http2: frame too large") // ReadFrame reads a single frame. The returned Frame is only valid // until the next call to ReadFrame. // If the frame is larger than previously set with SetMaxReadFrameSize, // the returned error is ErrFrameTooLarge. func (fr *Framer) ReadFrame() (Frame, error) { if fr.lastFrame != nil { fr.lastFrame.invalidate() } fh, err := readFrameHeader(fr.headerBuf[:], fr.r) if err != nil { return nil, err } if fh.Length > fr.maxReadSize { return nil, ErrFrameTooLarge } payload := fr.getReadBuf(fh.Length) if _, err := io.ReadFull(fr.r, payload); err != nil { return nil, err } f, err := typeFrameParser(fh.Type)(fh, payload) if err != nil { return nil, err } fr.lastFrame = f return f, nil } // A DataFrame conveys arbitrary, variable-length sequences of octets // associated with a stream. // See http://http2.github.io/http2-spec/#rfc.section.6.1 type DataFrame struct { FrameHeader data []byte } func (f *DataFrame) StreamEnded() bool { return f.FrameHeader.Flags.Has(FlagDataEndStream) } // Data returns the frame's data octets, not including any padding // size byte or padding suffix bytes. // The caller must not retain the returned memory past the next // call to ReadFrame. func (f *DataFrame) Data() []byte { f.checkValid() return f.data } func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) { if fh.StreamID == 0 { // DATA frames MUST be associated with a stream. If a // DATA frame is received whose stream identifier // field is 0x0, the recipient MUST respond with a // connection error (Section 5.4.1) of type // PROTOCOL_ERROR. return nil, ConnectionError(ErrCodeProtocol) } f := &DataFrame{ FrameHeader: fh, } var padSize byte if fh.Flags.Has(FlagDataPadded) { var err error payload, padSize, err = readByte(payload) if err != nil { return nil, err } } if int(padSize) > len(payload) { // If the length of the padding is greater than the // length of the frame payload, the recipient MUST // treat this as a connection error. // Filed: https://github.com/http2/http2-spec/issues/610 return nil, ConnectionError(ErrCodeProtocol) } f.data = payload[:len(payload)-int(padSize)] return f, nil } var errStreamID = errors.New("invalid streamid") func validStreamID(streamID uint32) bool { return streamID != 0 && streamID&(1<<31) == 0 } // WriteData writes a DATA frame. // // It will perform exactly one Write to the underlying Writer. // It is the caller's responsibility to not call other Write methods concurrently. func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error { // TODO: ignoring padding for now. will add when somebody cares. if !validStreamID(streamID) && !f.AllowIllegalWrites { return errStreamID } var flags Flags if endStream { flags |= FlagDataEndStream } f.startWrite(FrameData, flags, streamID) f.wbuf = append(f.wbuf, data...) return f.endWrite() } // A SettingsFrame conveys configuration parameters that affect how // endpoints communicate, such as preferences and constraints on peer // behavior. // // See http://http2.github.io/http2-spec/#SETTINGS type SettingsFrame struct { FrameHeader p []byte } func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) { if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 { // When this (ACK 0x1) bit is set, the payload of the // SETTINGS frame MUST be empty. Receipt of a // SETTINGS frame with the ACK flag set and a length // field value other than 0 MUST be treated as a // connection error (Section 5.4.1) of type // FRAME_SIZE_ERROR. return nil, ConnectionError(ErrCodeFrameSize) } if fh.StreamID != 0 { // SETTINGS frames always apply to a connection, // never a single stream. The stream identifier for a // SETTINGS frame MUST be zero (0x0). If an endpoint // receives a SETTINGS frame whose stream identifier // field is anything other than 0x0, the endpoint MUST // respond with a connection error (Section 5.4.1) of // type PROTOCOL_ERROR. return nil, ConnectionError(ErrCodeProtocol) } if len(p)%6 != 0 { // Expecting even number of 6 byte settings. return nil, ConnectionError(ErrCodeFrameSize) } f := &SettingsFrame{FrameHeader: fh, p: p} if v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 { // Values above the maximum flow control window size of 2^31 - 1 MUST // be treated as a connection error (Section 5.4.1) of type // FLOW_CONTROL_ERROR. return nil, ConnectionError(ErrCodeFlowControl) } return f, nil } func (f *SettingsFrame) IsAck() bool { return f.FrameHeader.Flags.Has(FlagSettingsAck) } func (f *SettingsFrame) Value(s SettingID) (v uint32, ok bool) { f.checkValid() buf := f.p for len(buf) > 0 { settingID := SettingID(binary.BigEndian.Uint16(buf[:2])) if settingID == s { return binary.BigEndian.Uint32(buf[2:6]), true } buf = buf[6:] } return 0, false } // ForeachSetting runs fn for each setting. // It stops and returns the first error. func (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error { f.checkValid() buf := f.p for len(buf) > 0 { if err := fn(Setting{ SettingID(binary.BigEndian.Uint16(buf[:2])), binary.BigEndian.Uint32(buf[2:6]), }); err != nil { return err } buf = buf[6:] } return nil } // WriteSettings writes a SETTINGS frame with zero or more settings // specified and the ACK bit not set. // // It will perform exactly one Write to the underlying Writer. // It is the caller's responsibility to not call other Write methods concurrently. func (f *Framer) WriteSettings(settings ...Setting) error { f.startWrite(FrameSettings, 0, 0) for _, s := range settings { f.writeUint16(uint16(s.ID)) f.writeUint32(s.Val) } return f.endWrite() } // WriteSettings writes an empty SETTINGS frame with the ACK bit set. // // It will perform exactly one Write to the underlying Writer. // It is the caller's responsibility to not call other Write methods concurrently. func (f *Framer) WriteSettingsAck() error { f.startWrite(FrameSettings, FlagSettingsAck, 0) return f.endWrite() } // A PingFrame is a mechanism for measuring a minimal round trip time // from the sender, as well as determining whether an idle connection // is still functional. // See http://http2.github.io/http2-spec/#rfc.section.6.7 type PingFrame struct { FrameHeader Data [8]byte } func parsePingFrame(fh FrameHeader, payload []byte) (Frame, error) { if len(payload) != 8 { return nil, ConnectionError(ErrCodeFrameSize) } if fh.StreamID != 0 { return nil, ConnectionError(ErrCodeProtocol) } f := &PingFrame{FrameHeader: fh} copy(f.Data[:], payload) return f, nil } func (f *Framer) WritePing(ack bool, data [8]byte) error { var flags Flags if ack { flags = FlagPingAck } f.startWrite(FramePing, flags, 0) f.writeBytes(data[:]) return f.endWrite() } // A GoAwayFrame informs the remote peer to stop creating streams on this connection. // See http://http2.github.io/http2-spec/#rfc.section.6.8 type GoAwayFrame struct { FrameHeader LastStreamID uint32 ErrCode ErrCode debugData []byte } // DebugData returns any debug data in the GOAWAY frame. Its contents // are not defined. // The caller must not retain the returned memory past the next // call to ReadFrame. func (f *GoAwayFrame) DebugData() []byte { f.checkValid() return f.debugData } func parseGoAwayFrame(fh FrameHeader, p []byte) (Frame, error) { if fh.StreamID != 0 { return nil, ConnectionError(ErrCodeProtocol) } if len(p) < 8 { return nil, ConnectionError(ErrCodeFrameSize) } return &GoAwayFrame{ FrameHeader: fh, LastStreamID: binary.BigEndian.Uint32(p[:4]) & (1<<31 - 1), ErrCode: ErrCode(binary.BigEndian.Uint32(p[4:8])), debugData: p[8:], }, nil } func (f *Framer) WriteGoAway(maxStreamID uint32, code ErrCode, debugData []byte) error { f.startWrite(FrameGoAway, 0, 0) f.writeUint32(maxStreamID & (1<<31 - 1)) f.writeUint32(uint32(code)) f.writeBytes(debugData) return f.endWrite() } // An UnknownFrame is the frame type returned when the frame type is unknown // or no specific frame type parser exists. type UnknownFrame struct { FrameHeader p []byte } // Payload returns the frame's payload (after the header). It is not // valid to call this method after a subsequent call to // Framer.ReadFrame, nor is it valid to retain the returned slice. // The memory is owned by the Framer and is invalidated when the next // frame is read. func (f *UnknownFrame) Payload() []byte { f.checkValid() return f.p } func parseUnknownFrame(fh FrameHeader, p []byte) (Frame, error) { return &UnknownFrame{fh, p}, nil } // A WindowUpdateFrame is used to implement flow control. // See http://http2.github.io/http2-spec/#rfc.section.6.9 type WindowUpdateFrame struct { FrameHeader Increment uint32 } func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) { if len(p) != 4 { return nil, ConnectionError(ErrCodeFrameSize) } inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit if inc == 0 { // A receiver MUST treat the receipt of a // WINDOW_UPDATE frame with an flow control window // increment of 0 as a stream error (Section 5.4.2) of // type PROTOCOL_ERROR; errors on the connection flow // control window MUST be treated as a connection // error (Section 5.4.1). if fh.StreamID == 0 { return nil, ConnectionError(ErrCodeProtocol) } return nil, StreamError{fh.StreamID, ErrCodeProtocol} } return &WindowUpdateFrame{ FrameHeader: fh, Increment: inc, }, nil } // WriteWindowUpdate writes a WINDOW_UPDATE frame. // The increment value must be between 1 and 2,147,483,647, inclusive. // If the Stream ID is zero, the window update applies to the // connection as a whole. func (f *Framer) WriteWindowUpdate(streamID, incr uint32) error { // "The legal range for the increment to the flow control window is 1 to 2^31-1 (2,147,483,647) octets." if (incr < 1 || incr > 2147483647) && !f.AllowIllegalWrites { return errors.New("illegal window increment value") } f.startWrite(FrameWindowUpdate, 0, streamID) f.writeUint32(incr) return f.endWrite() } // A HeadersFrame is used to open a stream and additionally carries a // header block fragment. type HeadersFrame struct { FrameHeader // Priority is set if FlagHeadersPriority is set in the FrameHeader. Priority PriorityParam headerFragBuf []byte // not owned } func (f *HeadersFrame) HeaderBlockFragment() []byte { f.checkValid() return f.headerFragBuf } func (f *HeadersFrame) HeadersEnded() bool { return f.FrameHeader.Flags.Has(FlagHeadersEndHeaders) } func (f *HeadersFrame) StreamEnded() bool { return f.FrameHeader.Flags.Has(FlagHeadersEndStream) } func (f *HeadersFrame) HasPriority() bool { return f.FrameHeader.Flags.Has(FlagHeadersPriority) } func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) { hf := &HeadersFrame{ FrameHeader: fh, } if fh.StreamID == 0 { // HEADERS frames MUST be associated with a stream. If a HEADERS frame // is received whose stream identifier field is 0x0, the recipient MUST // respond with a connection error (Section 5.4.1) of type // PROTOCOL_ERROR. return nil, ConnectionError(ErrCodeProtocol) } var padLength uint8 if fh.Flags.Has(FlagHeadersPadded) { if p, padLength, err = readByte(p); err != nil { return } } if fh.Flags.Has(FlagHeadersPriority) { var v uint32 p, v, err = readUint32(p) if err != nil { return nil, err } hf.Priority.StreamDep = v & 0x7fffffff hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set p, hf.Priority.Weight, err = readByte(p) if err != nil { return nil, err } } if len(p)-int(padLength) <= 0 { return nil, StreamError{fh.StreamID, ErrCodeProtocol} } hf.headerFragBuf = p[:len(p)-int(padLength)] return hf, nil } // HeadersFrameParam are the parameters for writing a HEADERS frame. type HeadersFrameParam struct { // StreamID is the required Stream ID to initiate. StreamID uint32 // BlockFragment is part (or all) of a Header Block. BlockFragment []byte // EndStream indicates that the header block is the last that // the endpoint will send for the identified stream. Setting // this flag causes the stream to enter one of "half closed" // states. EndStream bool // EndHeaders indicates that this frame contains an entire // header block and is not followed by any // CONTINUATION frames. EndHeaders bool // PadLength is the optional number of bytes of zeros to add // to this frame. PadLength uint8 // Priority, if non-zero, includes stream priority information // in the HEADER frame. Priority PriorityParam } // WriteHeaders writes a single HEADERS frame. // // This is a low-level header writing method. Encoding headers and // splitting them into any necessary CONTINUATION frames is handled // elsewhere. // // It will perform exactly one Write to the underlying Writer. // It is the caller's responsibility to not call other Write methods concurrently. func (f *Framer) WriteHeaders(p HeadersFrameParam) error { if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { return errStreamID } var flags Flags if p.PadLength != 0 { flags |= FlagHeadersPadded } if p.EndStream { flags |= FlagHeadersEndStream } if p.EndHeaders { flags |= FlagHeadersEndHeaders } if !p.Priority.IsZero() { flags |= FlagHeadersPriority } f.startWrite(FrameHeaders, flags, p.StreamID) if p.PadLength != 0 { f.writeByte(p.PadLength) } if !p.Priority.IsZero() { v := p.Priority.StreamDep if !validStreamID(v) && !f.AllowIllegalWrites { return errors.New("invalid dependent stream id") } if p.Priority.Exclusive { v |= 1 << 31 } f.writeUint32(v) f.writeByte(p.Priority.Weight) } f.wbuf = append(f.wbuf, p.BlockFragment...) f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) return f.endWrite() } // A PriorityFrame specifies the sender-advised priority of a stream. // See http://http2.github.io/http2-spec/#rfc.section.6.3 type PriorityFrame struct { FrameHeader PriorityParam } // PriorityParam are the stream prioritzation parameters. type PriorityParam struct { // StreamDep is a 31-bit stream identifier for the // stream that this stream depends on. Zero means no // dependency. StreamDep uint32 // Exclusive is whether the dependency is exclusive. Exclusive bool // Weight is the stream's zero-indexed weight. It should be // set together with StreamDep, or neither should be set. Per // the spec, "Add one to the value to obtain a weight between // 1 and 256." Weight uint8 } func (p PriorityParam) IsZero() bool { return p == PriorityParam{} } func parsePriorityFrame(fh FrameHeader, payload []byte) (Frame, error) { if fh.StreamID == 0 { return nil, ConnectionError(ErrCodeProtocol) } if len(payload) != 5 { return nil, ConnectionError(ErrCodeFrameSize) } v := binary.BigEndian.Uint32(payload[:4]) streamID := v & 0x7fffffff // mask off high bit return &PriorityFrame{ FrameHeader: fh, PriorityParam: PriorityParam{ Weight: payload[4], StreamDep: streamID, Exclusive: streamID != v, // was high bit set? }, }, nil } // WritePriority writes a PRIORITY frame. // // It will perform exactly one Write to the underlying Writer. // It is the caller's responsibility to not call other Write methods concurrently. func (f *Framer) WritePriority(streamID uint32, p PriorityParam) error { if !validStreamID(streamID) && !f.AllowIllegalWrites { return errStreamID } f.startWrite(FramePriority, 0, streamID) v := p.StreamDep if p.Exclusive { v |= 1 << 31 } f.writeUint32(v) f.writeByte(p.Weight) return f.endWrite() } // A RSTStreamFrame allows for abnormal termination of a stream. // See http://http2.github.io/http2-spec/#rfc.section.6.4 type RSTStreamFrame struct { FrameHeader ErrCode ErrCode } func parseRSTStreamFrame(fh FrameHeader, p []byte) (Frame, error) { if len(p) != 4 { return nil, ConnectionError(ErrCodeFrameSize) } if fh.StreamID == 0 { return nil, ConnectionError(ErrCodeProtocol) } return &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil } // WriteRSTStream writes a RST_STREAM frame. // // It will perform exactly one Write to the underlying Writer. // It is the caller's responsibility to not call other Write methods concurrently. func (f *Framer) WriteRSTStream(streamID uint32, code ErrCode) error { if !validStreamID(streamID) && !f.AllowIllegalWrites { return errStreamID } f.startWrite(FrameRSTStream, 0, streamID) f.writeUint32(uint32(code)) return f.endWrite() } // A ContinuationFrame is used to continue a sequence of header block fragments. // See http://http2.github.io/http2-spec/#rfc.section.6.10 type ContinuationFrame struct { FrameHeader headerFragBuf []byte } func parseContinuationFrame(fh FrameHeader, p []byte) (Frame, error) { return &ContinuationFrame{fh, p}, nil } func (f *ContinuationFrame) StreamEnded() bool { return f.FrameHeader.Flags.Has(FlagDataEndStream) } func (f *ContinuationFrame) HeaderBlockFragment() []byte { f.checkValid() return f.headerFragBuf } func (f *ContinuationFrame) HeadersEnded() bool { return f.FrameHeader.Flags.Has(FlagContinuationEndHeaders) } // WriteContinuation writes a CONTINUATION frame. // // It will perform exactly one Write to the underlying Writer. // It is the caller's responsibility to not call other Write methods concurrently. func (f *Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) error { if !validStreamID(streamID) && !f.AllowIllegalWrites { return errStreamID } var flags Flags if endHeaders { flags |= FlagContinuationEndHeaders } f.startWrite(FrameContinuation, flags, streamID) f.wbuf = append(f.wbuf, headerBlockFragment...) return f.endWrite() } // A PushPromiseFrame is used to initiate a server stream. // See http://http2.github.io/http2-spec/#rfc.section.6.6 type PushPromiseFrame struct { FrameHeader PromiseID uint32 headerFragBuf []byte // not owned } func (f *PushPromiseFrame) HeaderBlockFragment() []byte { f.checkValid() return f.headerFragBuf } func (f *PushPromiseFrame) HeadersEnded() bool { return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders) } func parsePushPromise(fh FrameHeader, p []byte) (_ Frame, err error) { pp := &PushPromiseFrame{ FrameHeader: fh, } if pp.StreamID == 0 { // PUSH_PROMISE frames MUST be associated with an existing, // peer-initiated stream. The stream identifier of a // PUSH_PROMISE frame indicates the stream it is associated // with. If the stream identifier field specifies the value // 0x0, a recipient MUST respond with a connection error // (Section 5.4.1) of type PROTOCOL_ERROR. return nil, ConnectionError(ErrCodeProtocol) } // The PUSH_PROMISE frame includes optional padding. // Padding fields and flags are identical to those defined for DATA frames var padLength uint8 if fh.Flags.Has(FlagPushPromisePadded) { if p, padLength, err = readByte(p); err != nil { return } } p, pp.PromiseID, err = readUint32(p) if err != nil { return } pp.PromiseID = pp.PromiseID & (1<<31 - 1) if int(padLength) > len(p) { // like the DATA frame, error out if padding is longer than the body. return nil, ConnectionError(ErrCodeProtocol) } pp.headerFragBuf = p[:len(p)-int(padLength)] return pp, nil } // PushPromiseParam are the parameters for writing a PUSH_PROMISE frame. type PushPromiseParam struct { // StreamID is the required Stream ID to initiate. StreamID uint32 // PromiseID is the required Stream ID which this // Push Promises PromiseID uint32 // BlockFragment is part (or all) of a Header Block. BlockFragment []byte // EndHeaders indicates that this frame contains an entire // header block and is not followed by any // CONTINUATION frames. EndHeaders bool // PadLength is the optional number of bytes of zeros to add // to this frame. PadLength uint8 } // WritePushPromise writes a single PushPromise Frame. // // As with Header Frames, This is the low level call for writing // individual frames. Continuation frames are handled elsewhere. // // It will perform exactly one Write to the underlying Writer. // It is the caller's responsibility to not call other Write methods concurrently. func (f *Framer) WritePushPromise(p PushPromiseParam) error { if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { return errStreamID } var flags Flags if p.PadLength != 0 { flags |= FlagPushPromisePadded } if p.EndHeaders { flags |= FlagPushPromiseEndHeaders } f.startWrite(FramePushPromise, flags, p.StreamID) if p.PadLength != 0 { f.writeByte(p.PadLength) } if !validStreamID(p.PromiseID) && !f.AllowIllegalWrites { return errStreamID } f.writeUint32(p.PromiseID) f.wbuf = append(f.wbuf, p.BlockFragment...) f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) return f.endWrite() } // WriteRawFrame writes a raw frame. This can be used to write // extension frames unknown to this package. func (f *Framer) WriteRawFrame(t FrameType, flags Flags, streamID uint32, payload []byte) error { f.startWrite(t, flags, streamID) f.writeBytes(payload) return f.endWrite() } func readByte(p []byte) (remain []byte, b byte, err error) { if len(p) == 0 { return nil, 0, io.ErrUnexpectedEOF } return p[1:], p[0], nil } func readUint32(p []byte) (remain []byte, v uint32, err error) { if len(p) < 4 { return nil, 0, io.ErrUnexpectedEOF } return p[4:], binary.BigEndian.Uint32(p[:4]), nil } type streamEnder interface { StreamEnded() bool } type headersEnder interface { HeadersEnded() bool } ================================================ FILE: vendor/github.com/bradfitz/http2/gotrack.go ================================================ // Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // See https://code.google.com/p/go/source/browse/CONTRIBUTORS // Licensed under the same terms as Go itself: // https://code.google.com/p/go/source/browse/LICENSE // Defensive debug-only utility to track that functions run on the // goroutine that they're supposed to. package http2 import ( "bytes" "errors" "fmt" "os" "runtime" "strconv" "sync" ) var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1" type goroutineLock uint64 func newGoroutineLock() goroutineLock { if !DebugGoroutines { return 0 } return goroutineLock(curGoroutineID()) } func (g goroutineLock) check() { if !DebugGoroutines { return } if curGoroutineID() != uint64(g) { panic("running on the wrong goroutine") } } func (g goroutineLock) checkNotOn() { if !DebugGoroutines { return } if curGoroutineID() == uint64(g) { panic("running on the wrong goroutine") } } var goroutineSpace = []byte("goroutine ") func curGoroutineID() uint64 { bp := littleBuf.Get().(*[]byte) defer littleBuf.Put(bp) b := *bp b = b[:runtime.Stack(b, false)] // Parse the 4707 out of "goroutine 4707 [" b = bytes.TrimPrefix(b, goroutineSpace) i := bytes.IndexByte(b, ' ') if i < 0 { panic(fmt.Sprintf("No space found in %q", b)) } b = b[:i] n, err := parseUintBytes(b, 10, 64) if err != nil { panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err)) } return n } var littleBuf = sync.Pool{ New: func() interface{} { buf := make([]byte, 64) return &buf }, } // parseUintBytes is like strconv.ParseUint, but using a []byte. func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) { var cutoff, maxVal uint64 if bitSize == 0 { bitSize = int(strconv.IntSize) } s0 := s switch { case len(s) < 1: err = strconv.ErrSyntax goto Error case 2 <= base && base <= 36: // valid base; nothing to do case base == 0: // Look for octal, hex prefix. switch { case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'): base = 16 s = s[2:] if len(s) < 1 { err = strconv.ErrSyntax goto Error } case s[0] == '0': base = 8 default: base = 10 } default: err = errors.New("invalid base " + strconv.Itoa(base)) goto Error } n = 0 cutoff = cutoff64(base) maxVal = 1<= base { n = 0 err = strconv.ErrSyntax goto Error } if n >= cutoff { // n*base overflows n = 1<<64 - 1 err = strconv.ErrRange goto Error } n *= uint64(base) n1 := n + uint64(v) if n1 < n || n1 > maxVal { // n+v overflows n = 1<<64 - 1 err = strconv.ErrRange goto Error } n = n1 } return n, nil Error: return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err} } // Return the first number n such that n*base >= 1<<64. func cutoff64(base int) uint64 { if base < 2 { return 0 } return (1<<64-1)/uint64(base) + 1 } ================================================ FILE: vendor/github.com/bradfitz/http2/h2i/README.md ================================================ # h2i **h2i** is an interactive HTTP/2 ("h2") console debugger. Miss the good ol' days of telnetting to your HTTP/1.n servers? We're bringing you back. Features: - send raw HTTP/2 frames - PING - SETTINGS - HEADERS - etc - type in HTTP/1.n and have it auto-HPACK/frame-ify it for HTTP/2 - pretty print all received HTTP/2 frames from the peer (including HPACK decoding) - tab completion of commands, options Not yet features, but soon: - unnecessary CONTINUATION frames on short boundaries, to test peer implementations - request bodies (DATA frames) - send invalid frames for testing server implementations (supported by underlying Framer) Later: - act like a server ## Installation ``` $ go get github.com/bradfitz/http2/h2i $ h2i ``` ## Demo ``` $ h2i Usage: h2i -insecure Whether to skip TLS cert validation -nextproto string Comma-separated list of NPN/ALPN protocol names to negotiate. (default "h2,h2-14") $ h2i google.com Connecting to google.com:443 ... Connected to 74.125.224.41:443 Negotiated protocol "h2-14" [FrameHeader SETTINGS len=18] [MAX_CONCURRENT_STREAMS = 100] [INITIAL_WINDOW_SIZE = 1048576] [MAX_FRAME_SIZE = 16384] [FrameHeader WINDOW_UPDATE len=4] Window-Increment = 983041 h2i> PING h2iSayHI [FrameHeader PING flags=ACK len=8] Data = "h2iSayHI" h2i> headers (as HTTP/1.1)> GET / HTTP/1.1 (as HTTP/1.1)> Host: ip.appspot.com (as HTTP/1.1)> User-Agent: h2i/brad-n-blake (as HTTP/1.1)> Opening Stream-ID 1: :authority = ip.appspot.com :method = GET :path = / :scheme = https user-agent = h2i/brad-n-blake [FrameHeader HEADERS flags=END_HEADERS stream=1 len=77] :status = "200" alternate-protocol = "443:quic,p=1" content-length = "15" content-type = "text/html" date = "Fri, 01 May 2015 23:06:56 GMT" server = "Google Frontend" [FrameHeader DATA flags=END_STREAM stream=1 len=15] "173.164.155.78\n" [FrameHeader PING len=8] Data = "\x00\x00\x00\x00\x00\x00\x00\x00" h2i> ping [FrameHeader PING flags=ACK len=8] Data = "h2i_ping" h2i> ping [FrameHeader PING flags=ACK len=8] Data = "h2i_ping" h2i> ping [FrameHeader GOAWAY len=22] Last-Stream-ID = 1; Error-Code = PROTOCOL_ERROR (1) ReadFrame: EOF ``` ## Status Quick few hour hack. So much yet to do. Feel free to file issues for bugs or wishlist items, but [@bmizerany](https://github.com/bmizerany/) and I aren't yet accepting pull requests until things settle down. ================================================ FILE: vendor/github.com/bradfitz/http2/h2i/h2i.go ================================================ // Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // See https://code.google.com/p/go/source/browse/CONTRIBUTORS // Licensed under the same terms as Go itself: // https://code.google.com/p/go/source/browse/LICENSE /* The h2i command is an interactive HTTP/2 console. Usage: $ h2i [flags] Interactive commands in the console: (all parts case-insensitive) ping [data] settings ack settings FOO=n BAR=z headers (open a new stream by typing HTTP/1.1) */ package main import ( "bufio" "bytes" "crypto/tls" "errors" "flag" "fmt" "io" "log" "net" "net/http" "os" "regexp" "strconv" "strings" "github.com/bradfitz/http2" "github.com/bradfitz/http2/hpack" "golang.org/x/crypto/ssh/terminal" ) // Flags var ( flagNextProto = flag.String("nextproto", "h2,h2-14", "Comma-separated list of NPN/ALPN protocol names to negotiate.") flagInsecure = flag.Bool("insecure", false, "Whether to skip TLS cert validation") ) type command struct { run func(*h2i, []string) error // required // complete optionally specifies tokens (case-insensitive) which are // valid for this subcommand. complete func() []string } var commands = map[string]command{ "ping": command{run: (*h2i).cmdPing}, "settings": command{ run: (*h2i).cmdSettings, complete: func() []string { return []string{ "ACK", http2.SettingHeaderTableSize.String(), http2.SettingEnablePush.String(), http2.SettingMaxConcurrentStreams.String(), http2.SettingInitialWindowSize.String(), http2.SettingMaxFrameSize.String(), http2.SettingMaxHeaderListSize.String(), } }, }, "quit": command{run: (*h2i).cmdQuit}, "headers": command{run: (*h2i).cmdHeaders}, } func usage() { fmt.Fprintf(os.Stderr, "Usage: h2i \n\n") flag.PrintDefaults() os.Exit(1) } // withPort adds ":443" if another port isn't already present. func withPort(host string) string { if _, _, err := net.SplitHostPort(host); err != nil { return net.JoinHostPort(host, "443") } return host } // h2i is the app's state. type h2i struct { host string tc *tls.Conn framer *http2.Framer term *terminal.Terminal // owned by the command loop: streamID uint32 hbuf bytes.Buffer henc *hpack.Encoder // owned by the readFrames loop: peerSetting map[http2.SettingID]uint32 hdec *hpack.Decoder } func main() { flag.Usage = usage flag.Parse() if flag.NArg() != 1 { usage() } log.SetFlags(0) host := flag.Arg(0) app := &h2i{ host: host, peerSetting: make(map[http2.SettingID]uint32), } app.henc = hpack.NewEncoder(&app.hbuf) if err := app.Main(); err != nil { if app.term != nil { app.logf("%v\n", err) } else { fmt.Fprintf(os.Stderr, "%v\n", err) } os.Exit(1) } fmt.Fprintf(os.Stdout, "\n") } func (app *h2i) Main() error { cfg := &tls.Config{ ServerName: app.host, NextProtos: strings.Split(*flagNextProto, ","), InsecureSkipVerify: *flagInsecure, } hostAndPort := withPort(app.host) log.Printf("Connecting to %s ...", hostAndPort) tc, err := tls.Dial("tcp", hostAndPort, cfg) if err != nil { return fmt.Errorf("Error dialing %s: %v", withPort(app.host), err) } log.Printf("Connected to %v", tc.RemoteAddr()) defer tc.Close() if err := tc.Handshake(); err != nil { return fmt.Errorf("TLS handshake: %v", err) } if !*flagInsecure { if err := tc.VerifyHostname(app.host); err != nil { return fmt.Errorf("VerifyHostname: %v", err) } } state := tc.ConnectionState() log.Printf("Negotiated protocol %q", state.NegotiatedProtocol) if !state.NegotiatedProtocolIsMutual || state.NegotiatedProtocol == "" { return fmt.Errorf("Could not negotiate protocol mutually") } if _, err := io.WriteString(tc, http2.ClientPreface); err != nil { return err } app.framer = http2.NewFramer(tc, tc) oldState, err := terminal.MakeRaw(0) if err != nil { return err } defer terminal.Restore(0, oldState) var screen = struct { io.Reader io.Writer }{os.Stdin, os.Stdout} app.term = terminal.NewTerminal(screen, "h2i> ") lastWord := regexp.MustCompile(`.+\W(\w+)$`) app.term.AutoCompleteCallback = func(line string, pos int, key rune) (newLine string, newPos int, ok bool) { if key != '\t' { return } if pos != len(line) { // TODO: we're being lazy for now, only supporting tab completion at the end. return } // Auto-complete for the command itself. if !strings.Contains(line, " ") { var name string name, _, ok = lookupCommand(line) if !ok { return } return name, len(name), true } _, c, ok := lookupCommand(line[:strings.IndexByte(line, ' ')]) if !ok || c.complete == nil { return } if strings.HasSuffix(line, " ") { app.logf("%s", strings.Join(c.complete(), " ")) return line, pos, true } m := lastWord.FindStringSubmatch(line) if m == nil { return line, len(line), true } soFar := m[1] var match []string for _, cand := range c.complete() { if len(soFar) > len(cand) || !strings.EqualFold(cand[:len(soFar)], soFar) { continue } match = append(match, cand) } if len(match) == 0 { return } if len(match) > 1 { // TODO: auto-complete any common prefix app.logf("%s", strings.Join(match, " ")) return line, pos, true } newLine = line[:len(line)-len(soFar)] + match[0] return newLine, len(newLine), true } errc := make(chan error, 2) go func() { errc <- app.readFrames() }() go func() { errc <- app.readConsole() }() return <-errc } func (app *h2i) logf(format string, args ...interface{}) { fmt.Fprintf(app.term, format+"\n", args...) } func (app *h2i) readConsole() error { for { line, err := app.term.ReadLine() if err == io.EOF { return nil } if err != nil { return fmt.Errorf("terminal.ReadLine: %v", err) } f := strings.Fields(line) if len(f) == 0 { continue } cmd, args := f[0], f[1:] if _, c, ok := lookupCommand(cmd); ok { err = c.run(app, args) } else { app.logf("Unknown command %q", line) } if err == errExitApp { return nil } if err != nil { return err } } } func lookupCommand(prefix string) (name string, c command, ok bool) { prefix = strings.ToLower(prefix) if c, ok = commands[prefix]; ok { return prefix, c, ok } for full, candidate := range commands { if strings.HasPrefix(full, prefix) { if c.run != nil { return "", command{}, false // ambiguous } c = candidate name = full } } return name, c, c.run != nil } var errExitApp = errors.New("internal sentinel error value to quit the console reading loop") func (a *h2i) cmdQuit(args []string) error { if len(args) > 0 { a.logf("the QUIT command takes no argument") return nil } return errExitApp } func (a *h2i) cmdSettings(args []string) error { if len(args) == 1 && strings.EqualFold(args[0], "ACK") { return a.framer.WriteSettingsAck() } var settings []http2.Setting for _, arg := range args { if strings.EqualFold(arg, "ACK") { a.logf("Error: ACK must be only argument with the SETTINGS command") return nil } eq := strings.Index(arg, "=") if eq == -1 { a.logf("Error: invalid argument %q (expected SETTING_NAME=nnnn)", arg) return nil } sid, ok := settingByName(arg[:eq]) if !ok { a.logf("Error: unknown setting name %q", arg[:eq]) return nil } val, err := strconv.ParseUint(arg[eq+1:], 10, 32) if err != nil { a.logf("Error: invalid argument %q (expected SETTING_NAME=nnnn)", arg) return nil } settings = append(settings, http2.Setting{ ID: sid, Val: uint32(val), }) } a.logf("Sending: %v", settings) return a.framer.WriteSettings(settings...) } func settingByName(name string) (http2.SettingID, bool) { for _, sid := range [...]http2.SettingID{ http2.SettingHeaderTableSize, http2.SettingEnablePush, http2.SettingMaxConcurrentStreams, http2.SettingInitialWindowSize, http2.SettingMaxFrameSize, http2.SettingMaxHeaderListSize, } { if strings.EqualFold(sid.String(), name) { return sid, true } } return 0, false } func (app *h2i) cmdPing(args []string) error { if len(args) > 1 { app.logf("invalid PING usage: only accepts 0 or 1 args") return nil // nil means don't end the program } var data [8]byte if len(args) == 1 { copy(data[:], args[0]) } else { copy(data[:], "h2i_ping") } return app.framer.WritePing(false, data) } func (app *h2i) cmdHeaders(args []string) error { if len(args) > 0 { app.logf("Error: HEADERS doesn't yet take arguments.") // TODO: flags for restricting window size, to force CONTINUATION // frames. return nil } var h1req bytes.Buffer app.term.SetPrompt("(as HTTP/1.1)> ") defer app.term.SetPrompt("h2i> ") for { line, err := app.term.ReadLine() if err != nil { return err } h1req.WriteString(line) h1req.WriteString("\r\n") if line == "" { break } } req, err := http.ReadRequest(bufio.NewReader(&h1req)) if err != nil { app.logf("Invalid HTTP/1.1 request: %v", err) return nil } if app.streamID == 0 { app.streamID = 1 } else { app.streamID += 2 } app.logf("Opening Stream-ID %d:", app.streamID) hbf := app.encodeHeaders(req) if len(hbf) > 16<<10 { app.logf("TODO: h2i doesn't yet write CONTINUATION frames. Copy it from transport.go") return nil } return app.framer.WriteHeaders(http2.HeadersFrameParam{ StreamID: app.streamID, BlockFragment: hbf, EndStream: req.Method == "GET" || req.Method == "HEAD", // good enough for now EndHeaders: true, // for now }) } func (app *h2i) readFrames() error { for { f, err := app.framer.ReadFrame() if err != nil { return fmt.Errorf("ReadFrame: %v", err) } app.logf("%v", f) switch f := f.(type) { case *http2.PingFrame: app.logf(" Data = %q", f.Data) case *http2.SettingsFrame: f.ForeachSetting(func(s http2.Setting) error { app.logf(" %v", s) app.peerSetting[s.ID] = s.Val return nil }) case *http2.WindowUpdateFrame: app.logf(" Window-Increment = %v\n", f.Increment) case *http2.GoAwayFrame: app.logf(" Last-Stream-ID = %d; Error-Code = %v (%d)\n", f.LastStreamID, f.ErrCode, f.ErrCode) case *http2.DataFrame: app.logf(" %q", f.Data()) case *http2.HeadersFrame: if f.HasPriority() { app.logf(" PRIORITY = %v", f.Priority) } if app.hdec == nil { // TODO: if the user uses h2i to send a SETTINGS frame advertising // something larger, we'll need to respect SETTINGS_HEADER_TABLE_SIZE // and stuff here instead of using the 4k default. But for now: tableSize := uint32(4 << 10) app.hdec = hpack.NewDecoder(tableSize, app.onNewHeaderField) } app.hdec.Write(f.HeaderBlockFragment()) } } } // called from readLoop func (app *h2i) onNewHeaderField(f hpack.HeaderField) { if f.Sensitive { app.logf(" %s = %q (SENSITIVE)", f.Name, f.Value) } app.logf(" %s = %q", f.Name, f.Value) } func (app *h2i) encodeHeaders(req *http.Request) []byte { app.hbuf.Reset() // TODO(bradfitz): figure out :authority-vs-Host stuff between http2 and Go host := req.Host if host == "" { host = req.URL.Host } path := req.URL.Path if path == "" { path = "/" } app.writeHeader(":authority", host) // probably not right for all sites app.writeHeader(":method", req.Method) app.writeHeader(":path", path) app.writeHeader(":scheme", "https") for k, vv := range req.Header { lowKey := strings.ToLower(k) if lowKey == "host" { continue } for _, v := range vv { app.writeHeader(lowKey, v) } } return app.hbuf.Bytes() } func (app *h2i) writeHeader(name, value string) { app.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) app.logf(" %s = %s", name, value) } ================================================ FILE: vendor/github.com/bradfitz/http2/headermap.go ================================================ // Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // See https://code.google.com/p/go/source/browse/CONTRIBUTORS // Licensed under the same terms as Go itself: // https://code.google.com/p/go/source/browse/LICENSE package http2 import ( "net/http" "strings" ) var ( commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case ) func init() { for _, v := range []string{ "accept", "accept-charset", "accept-encoding", "accept-language", "accept-ranges", "age", "access-control-allow-origin", "allow", "authorization", "cache-control", "content-disposition", "content-encoding", "content-language", "content-length", "content-location", "content-range", "content-type", "cookie", "date", "etag", "expect", "expires", "from", "host", "if-match", "if-modified-since", "if-none-match", "if-unmodified-since", "last-modified", "link", "location", "max-forwards", "proxy-authenticate", "proxy-authorization", "range", "referer", "refresh", "retry-after", "server", "set-cookie", "strict-transport-security", "transfer-encoding", "user-agent", "vary", "via", "www-authenticate", } { chk := http.CanonicalHeaderKey(v) commonLowerHeader[chk] = v commonCanonHeader[v] = chk } } func lowerHeader(v string) string { if s, ok := commonLowerHeader[v]; ok { return s } return strings.ToLower(v) } ================================================ FILE: vendor/github.com/bradfitz/http2/hpack/encode.go ================================================ // Copyright 2014 The Go Authors. // See https://code.google.com/p/go/source/browse/CONTRIBUTORS // Licensed under the same terms as Go itself: // https://code.google.com/p/go/source/browse/LICENSE package hpack import ( "io" ) const ( uint32Max = ^uint32(0) initialHeaderTableSize = 4096 ) type Encoder struct { dynTab dynamicTable // minSize is the minimum table size set by // SetMaxDynamicTableSize after the previous Header Table Size // Update. minSize uint32 // maxSizeLimit is the maximum table size this encoder // supports. This will protect the encoder from too large // size. maxSizeLimit uint32 // tableSizeUpdate indicates whether "Header Table Size // Update" is required. tableSizeUpdate bool w io.Writer buf []byte } // NewEncoder returns a new Encoder which performs HPACK encoding. An // encoded data is written to w. func NewEncoder(w io.Writer) *Encoder { e := &Encoder{ minSize: uint32Max, maxSizeLimit: initialHeaderTableSize, tableSizeUpdate: false, w: w, } e.dynTab.setMaxSize(initialHeaderTableSize) return e } // WriteField encodes f into a single Write to e's underlying Writer. // This function may also produce bytes for "Header Table Size Update" // if necessary. If produced, it is done before encoding f. func (e *Encoder) WriteField(f HeaderField) error { e.buf = e.buf[:0] if e.tableSizeUpdate { e.tableSizeUpdate = false if e.minSize < e.dynTab.maxSize { e.buf = appendTableSize(e.buf, e.minSize) } e.minSize = uint32Max e.buf = appendTableSize(e.buf, e.dynTab.maxSize) } idx, nameValueMatch := e.searchTable(f) if nameValueMatch { e.buf = appendIndexed(e.buf, idx) } else { indexing := e.shouldIndex(f) if indexing { e.dynTab.add(f) } if idx == 0 { e.buf = appendNewName(e.buf, f, indexing) } else { e.buf = appendIndexedName(e.buf, f, idx, indexing) } } n, err := e.w.Write(e.buf) if err == nil && n != len(e.buf) { err = io.ErrShortWrite } return err } // searchTable searches f in both stable and dynamic header tables. // The static header table is searched first. Only when there is no // exact match for both name and value, the dynamic header table is // then searched. If there is no match, i is 0. If both name and value // match, i is the matched index and nameValueMatch becomes true. If // only name matches, i points to that index and nameValueMatch // becomes false. func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) { for idx, hf := range staticTable { if !constantTimeStringCompare(hf.Name, f.Name) { continue } if i == 0 { i = uint64(idx + 1) } if f.Sensitive { continue } if !constantTimeStringCompare(hf.Value, f.Value) { continue } i = uint64(idx + 1) nameValueMatch = true return } j, nameValueMatch := e.dynTab.search(f) if nameValueMatch || (i == 0 && j != 0) { i = j + uint64(len(staticTable)) } return } // SetMaxDynamicTableSize changes the dynamic header table size to v. // The actual size is bounded by the value passed to // SetMaxDynamicTableSizeLimit. func (e *Encoder) SetMaxDynamicTableSize(v uint32) { if v > e.maxSizeLimit { v = e.maxSizeLimit } if v < e.minSize { e.minSize = v } e.tableSizeUpdate = true e.dynTab.setMaxSize(v) } // SetMaxDynamicTableSizeLimit changes the maximum value that can be // specified in SetMaxDynamicTableSize to v. By default, it is set to // 4096, which is the same size of the default dynamic header table // size described in HPACK specification. If the current maximum // dynamic header table size is strictly greater than v, "Header Table // Size Update" will be done in the next WriteField call and the // maximum dynamic header table size is truncated to v. func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) { e.maxSizeLimit = v if e.dynTab.maxSize > v { e.tableSizeUpdate = true e.dynTab.setMaxSize(v) } } // shouldIndex reports whether f should be indexed. func (e *Encoder) shouldIndex(f HeaderField) bool { return !f.Sensitive && f.size() <= e.dynTab.maxSize } // appendIndexed appends index i, as encoded in "Indexed Header Field" // representation, to dst and returns the extended buffer. func appendIndexed(dst []byte, i uint64) []byte { first := len(dst) dst = appendVarInt(dst, 7, i) dst[first] |= 0x80 return dst } // appendNewName appends f, as encoded in one of "Literal Header field // - New Name" representation variants, to dst and returns the // extended buffer. // // If f.Sensitive is true, "Never Indexed" representation is used. If // f.Sensitive is false and indexing is true, "Inremental Indexing" // representation is used. func appendNewName(dst []byte, f HeaderField, indexing bool) []byte { dst = append(dst, encodeTypeByte(indexing, f.Sensitive)) dst = appendHpackString(dst, f.Name) return appendHpackString(dst, f.Value) } // appendIndexedName appends f and index i referring indexed name // entry, as encoded in one of "Literal Header field - Indexed Name" // representation variants, to dst and returns the extended buffer. // // If f.Sensitive is true, "Never Indexed" representation is used. If // f.Sensitive is false and indexing is true, "Incremental Indexing" // representation is used. func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte { first := len(dst) var n byte if indexing { n = 6 } else { n = 4 } dst = appendVarInt(dst, n, i) dst[first] |= encodeTypeByte(indexing, f.Sensitive) return appendHpackString(dst, f.Value) } // appendTableSize appends v, as encoded in "Header Table Size Update" // representation, to dst and returns the extended buffer. func appendTableSize(dst []byte, v uint32) []byte { first := len(dst) dst = appendVarInt(dst, 5, uint64(v)) dst[first] |= 0x20 return dst } // appendVarInt appends i, as encoded in variable integer form using n // bit prefix, to dst and returns the extended buffer. // // See // http://http2.github.io/http2-spec/compression.html#integer.representation func appendVarInt(dst []byte, n byte, i uint64) []byte { k := uint64((1 << n) - 1) if i < k { return append(dst, byte(i)) } dst = append(dst, byte(k)) i -= k for ; i >= 128; i >>= 7 { dst = append(dst, byte(0x80|(i&0x7f))) } return append(dst, byte(i)) } // appendHpackString appends s, as encoded in "String Literal" // representation, to dst and returns the the extended buffer. // // s will be encoded in Huffman codes only when it produces strictly // shorter byte string. func appendHpackString(dst []byte, s string) []byte { huffmanLength := HuffmanEncodeLength(s) if huffmanLength < uint64(len(s)) { first := len(dst) dst = appendVarInt(dst, 7, huffmanLength) dst = AppendHuffmanString(dst, s) dst[first] |= 0x80 } else { dst = appendVarInt(dst, 7, uint64(len(s))) dst = append(dst, s...) } return dst } // encodeTypeByte returns type byte. If sensitive is true, type byte // for "Never Indexed" representation is returned. If sensitive is // false and indexing is true, type byte for "Incremental Indexing" // representation is returned. Otherwise, type byte for "Without // Indexing" is returned. func encodeTypeByte(indexing, sensitive bool) byte { if sensitive { return 0x10 } if indexing { return 0x40 } return 0 } ================================================ FILE: vendor/github.com/bradfitz/http2/hpack/hpack.go ================================================ // Copyright 2014 The Go Authors. // See https://code.google.com/p/go/source/browse/CONTRIBUTORS // Licensed under the same terms as Go itself: // https://code.google.com/p/go/source/browse/LICENSE // Package hpack implements HPACK, a compression format for // efficiently representing HTTP header fields in the context of HTTP/2. // // See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09 package hpack import ( "bytes" "errors" "fmt" ) // A DecodingError is something the spec defines as a decoding error. type DecodingError struct { Err error } func (de DecodingError) Error() string { return fmt.Sprintf("decoding error: %v", de.Err) } // An InvalidIndexError is returned when an encoder references a table // entry before the static table or after the end of the dynamic table. type InvalidIndexError int func (e InvalidIndexError) Error() string { return fmt.Sprintf("invalid indexed representation index %d", int(e)) } // A HeaderField is a name-value pair. Both the name and value are // treated as opaque sequences of octets. type HeaderField struct { Name, Value string // Sensitive means that this header field should never be // indexed. Sensitive bool } func (hf *HeaderField) size() uint32 { // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 // "The size of the dynamic table is the sum of the size of // its entries. The size of an entry is the sum of its name's // length in octets (as defined in Section 5.2), its value's // length in octets (see Section 5.2), plus 32. The size of // an entry is calculated using the length of the name and // value without any Huffman encoding applied." // This can overflow if somebody makes a large HeaderField // Name and/or Value by hand, but we don't care, because that // won't happen on the wire because the encoding doesn't allow // it. return uint32(len(hf.Name) + len(hf.Value) + 32) } // A Decoder is the decoding context for incremental processing of // header blocks. type Decoder struct { dynTab dynamicTable emit func(f HeaderField) // buf is the unparsed buffer. It's only written to // saveBuf if it was truncated in the middle of a header // block. Because it's usually not owned, we can only // process it under Write. buf []byte // usually not owned saveBuf bytes.Buffer } func NewDecoder(maxSize uint32, emitFunc func(f HeaderField)) *Decoder { d := &Decoder{ emit: emitFunc, } d.dynTab.allowedMaxSize = maxSize d.dynTab.setMaxSize(maxSize) return d } // TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their // underlying buffers for garbage reasons. func (d *Decoder) SetMaxDynamicTableSize(v uint32) { d.dynTab.setMaxSize(v) } // SetAllowedMaxDynamicTableSize sets the upper bound that the encoded // stream (via dynamic table size updates) may set the maximum size // to. func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) { d.dynTab.allowedMaxSize = v } type dynamicTable struct { // ents is the FIFO described at // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2 // The newest (low index) is append at the end, and items are // evicted from the front. ents []HeaderField size uint32 maxSize uint32 // current maxSize allowedMaxSize uint32 // maxSize may go up to this, inclusive } func (dt *dynamicTable) setMaxSize(v uint32) { dt.maxSize = v dt.evict() } // TODO: change dynamicTable to be a struct with a slice and a size int field, // per http://http2.github.io/http2-spec/compression.html#rfc.section.4.1: // // // Then make add increment the size. maybe the max size should move from Decoder to // dynamicTable and add should return an ok bool if there was enough space. // // Later we'll need a remove operation on dynamicTable. func (dt *dynamicTable) add(f HeaderField) { dt.ents = append(dt.ents, f) dt.size += f.size() dt.evict() } // If we're too big, evict old stuff (front of the slice) func (dt *dynamicTable) evict() { base := dt.ents // keep base pointer of slice for dt.size > dt.maxSize { dt.size -= dt.ents[0].size() dt.ents = dt.ents[1:] } // Shift slice contents down if we evicted things. if len(dt.ents) != len(base) { copy(base, dt.ents) dt.ents = base[:len(dt.ents)] } } // constantTimeStringCompare compares string a and b in a constant // time manner. func constantTimeStringCompare(a, b string) bool { if len(a) != len(b) { return false } c := byte(0) for i := 0; i < len(a); i++ { c |= a[i] ^ b[i] } return c == 0 } // Search searches f in the table. The return value i is 0 if there is // no name match. If there is name match or name/value match, i is the // index of that entry (1-based). If both name and value match, // nameValueMatch becomes true. func (dt *dynamicTable) search(f HeaderField) (i uint64, nameValueMatch bool) { l := len(dt.ents) for j := l - 1; j >= 0; j-- { ent := dt.ents[j] if !constantTimeStringCompare(ent.Name, f.Name) { continue } if i == 0 { i = uint64(l - j) } if f.Sensitive { continue } if !constantTimeStringCompare(ent.Value, f.Value) { continue } i = uint64(l - j) nameValueMatch = true return } return } func (d *Decoder) maxTableIndex() int { return len(d.dynTab.ents) + len(staticTable) } func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) { if i < 1 { return } if i > uint64(d.maxTableIndex()) { return } if i <= uint64(len(staticTable)) { return staticTable[i-1], true } dents := d.dynTab.ents return dents[len(dents)-(int(i)-len(staticTable))], true } // Decode decodes an entire block. // // TODO: remove this method and make it incremental later? This is // easier for debugging now. func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) { var hf []HeaderField saveFunc := d.emit defer func() { d.emit = saveFunc }() d.emit = func(f HeaderField) { hf = append(hf, f) } if _, err := d.Write(p); err != nil { return nil, err } if err := d.Close(); err != nil { return nil, err } return hf, nil } func (d *Decoder) Close() error { if d.saveBuf.Len() > 0 { d.saveBuf.Reset() return DecodingError{errors.New("truncated headers")} } return nil } func (d *Decoder) Write(p []byte) (n int, err error) { if len(p) == 0 { // Prevent state machine CPU attacks (making us redo // work up to the point of finding out we don't have // enough data) return } // Only copy the data if we have to. Optimistically assume // that p will contain a complete header block. if d.saveBuf.Len() == 0 { d.buf = p } else { d.saveBuf.Write(p) d.buf = d.saveBuf.Bytes() d.saveBuf.Reset() } for len(d.buf) > 0 { err = d.parseHeaderFieldRepr() if err != nil { if err == errNeedMore { err = nil d.saveBuf.Write(d.buf) } break } } return len(p), err } // errNeedMore is an internal sentinel error value that means the // buffer is truncated and we need to read more data before we can // continue parsing. var errNeedMore = errors.New("need more data") type indexType int const ( indexedTrue indexType = iota indexedFalse indexedNever ) func (v indexType) indexed() bool { return v == indexedTrue } func (v indexType) sensitive() bool { return v == indexedNever } // returns errNeedMore if there isn't enough data available. // any other error is fatal. // consumes d.buf iff it returns nil. // precondition: must be called with len(d.buf) > 0 func (d *Decoder) parseHeaderFieldRepr() error { b := d.buf[0] switch { case b&128 != 0: // Indexed representation. // High bit set? // http://http2.github.io/http2-spec/compression.html#rfc.section.6.1 return d.parseFieldIndexed() case b&192 == 64: // 6.2.1 Literal Header Field with Incremental Indexing // 0b10xxxxxx: top two bits are 10 // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1 return d.parseFieldLiteral(6, indexedTrue) case b&240 == 0: // 6.2.2 Literal Header Field without Indexing // 0b0000xxxx: top four bits are 0000 // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2 return d.parseFieldLiteral(4, indexedFalse) case b&240 == 16: // 6.2.3 Literal Header Field never Indexed // 0b0001xxxx: top four bits are 0001 // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3 return d.parseFieldLiteral(4, indexedNever) case b&224 == 32: // 6.3 Dynamic Table Size Update // Top three bits are '001'. // http://http2.github.io/http2-spec/compression.html#rfc.section.6.3 return d.parseDynamicTableSizeUpdate() } return DecodingError{errors.New("invalid encoding")} } // (same invariants and behavior as parseHeaderFieldRepr) func (d *Decoder) parseFieldIndexed() error { buf := d.buf idx, buf, err := readVarInt(7, buf) if err != nil { return err } hf, ok := d.at(idx) if !ok { return DecodingError{InvalidIndexError(idx)} } d.emit(HeaderField{Name: hf.Name, Value: hf.Value}) d.buf = buf return nil } // (same invariants and behavior as parseHeaderFieldRepr) func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error { buf := d.buf nameIdx, buf, err := readVarInt(n, buf) if err != nil { return err } var hf HeaderField if nameIdx > 0 { ihf, ok := d.at(nameIdx) if !ok { return DecodingError{InvalidIndexError(nameIdx)} } hf.Name = ihf.Name } else { hf.Name, buf, err = readString(buf) if err != nil { return err } } hf.Value, buf, err = readString(buf) if err != nil { return err } d.buf = buf if it.indexed() { d.dynTab.add(hf) } hf.Sensitive = it.sensitive() d.emit(hf) return nil } // (same invariants and behavior as parseHeaderFieldRepr) func (d *Decoder) parseDynamicTableSizeUpdate() error { buf := d.buf size, buf, err := readVarInt(5, buf) if err != nil { return err } if size > uint64(d.dynTab.allowedMaxSize) { return DecodingError{errors.New("dynamic table size update too large")} } d.dynTab.setMaxSize(uint32(size)) d.buf = buf return nil } var errVarintOverflow = DecodingError{errors.New("varint integer overflow")} // readVarInt reads an unsigned variable length integer off the // beginning of p. n is the parameter as described in // http://http2.github.io/http2-spec/compression.html#rfc.section.5.1. // // n must always be between 1 and 8. // // The returned remain buffer is either a smaller suffix of p, or err != nil. // The error is errNeedMore if p doesn't contain a complete integer. func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) { if n < 1 || n > 8 { panic("bad n") } if len(p) == 0 { return 0, p, errNeedMore } i = uint64(p[0]) if n < 8 { i &= (1 << uint64(n)) - 1 } if i < (1< 0 { b := p[0] p = p[1:] i += uint64(b&127) << m if b&128 == 0 { return i, p, nil } m += 7 if m >= 63 { // TODO: proper overflow check. making this up. return 0, origP, errVarintOverflow } } return 0, origP, errNeedMore } func readString(p []byte) (s string, remain []byte, err error) { if len(p) == 0 { return "", p, errNeedMore } isHuff := p[0]&128 != 0 strLen, p, err := readVarInt(7, p) if err != nil { return "", p, err } if uint64(len(p)) < strLen { return "", p, errNeedMore } if !isHuff { return string(p[:strLen]), p[strLen:], nil } // TODO: optimize this garbage: var buf bytes.Buffer if _, err := HuffmanDecode(&buf, p[:strLen]); err != nil { return "", nil, err } return buf.String(), p[strLen:], nil } ================================================ FILE: vendor/github.com/bradfitz/http2/hpack/huffman.go ================================================ // Copyright 2014 The Go Authors. // See https://code.google.com/p/go/source/browse/CONTRIBUTORS // Licensed under the same terms as Go itself: // https://code.google.com/p/go/source/browse/LICENSE package hpack import ( "bytes" "io" "sync" ) var bufPool = sync.Pool{ New: func() interface{} { return new(bytes.Buffer) }, } // HuffmanDecode decodes the string in v and writes the expanded // result to w, returning the number of bytes written to w and the // Write call's return value. At most one Write call is made. func HuffmanDecode(w io.Writer, v []byte) (int, error) { buf := bufPool.Get().(*bytes.Buffer) buf.Reset() defer bufPool.Put(buf) n := rootHuffmanNode cur, nbits := uint(0), uint8(0) for _, b := range v { cur = cur<<8 | uint(b) nbits += 8 for nbits >= 8 { n = n.children[byte(cur>>(nbits-8))] if n.children == nil { buf.WriteByte(n.sym) nbits -= n.codeLen n = rootHuffmanNode } else { nbits -= 8 } } } for nbits > 0 { n = n.children[byte(cur<<(8-nbits))] if n.children != nil || n.codeLen > nbits { break } buf.WriteByte(n.sym) nbits -= n.codeLen n = rootHuffmanNode } return w.Write(buf.Bytes()) } type node struct { // children is non-nil for internal nodes children []*node // The following are only valid if children is nil: codeLen uint8 // number of bits that led to the output of sym sym byte // output symbol } func newInternalNode() *node { return &node{children: make([]*node, 256)} } var rootHuffmanNode = newInternalNode() func init() { for i, code := range huffmanCodes { if i > 255 { panic("too many huffman codes") } addDecoderNode(byte(i), code, huffmanCodeLen[i]) } } func addDecoderNode(sym byte, code uint32, codeLen uint8) { cur := rootHuffmanNode for codeLen > 8 { codeLen -= 8 i := uint8(code >> codeLen) if cur.children[i] == nil { cur.children[i] = newInternalNode() } cur = cur.children[i] } shift := 8 - codeLen start, end := int(uint8(code<> (nbits - rembits)) dst[len(dst)-1] |= t } return dst } // HuffmanEncodeLength returns the number of bytes required to encode // s in Huffman codes. The result is round up to byte boundary. func HuffmanEncodeLength(s string) uint64 { n := uint64(0) for i := 0; i < len(s); i++ { n += uint64(huffmanCodeLen[s[i]]) } return (n + 7) / 8 } // appendByteToHuffmanCode appends Huffman code for c to dst and // returns the extended buffer and the remaining bits in the last // element. The appending is not byte aligned and the remaining bits // in the last element of dst is given in rembits. func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) { code := huffmanCodes[c] nbits := huffmanCodeLen[c] for { if rembits > nbits { t := uint8(code << (rembits - nbits)) dst[len(dst)-1] |= t rembits -= nbits break } t := uint8(code >> (nbits - rembits)) dst[len(dst)-1] |= t nbits -= rembits rembits = 8 if nbits == 0 { break } dst = append(dst, 0) } return dst, rembits } ================================================ FILE: vendor/github.com/bradfitz/http2/hpack/tables.go ================================================ // Copyright 2014 The Go Authors. // See https://code.google.com/p/go/source/browse/CONTRIBUTORS // Licensed under the same terms as Go itself: // https://code.google.com/p/go/source/browse/LICENSE package hpack func pair(name, value string) HeaderField { return HeaderField{Name: name, Value: value} } // http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B var staticTable = []HeaderField{ pair(":authority", ""), // index 1 (1-based) pair(":method", "GET"), pair(":method", "POST"), pair(":path", "/"), pair(":path", "/index.html"), pair(":scheme", "http"), pair(":scheme", "https"), pair(":status", "200"), pair(":status", "204"), pair(":status", "206"), pair(":status", "304"), pair(":status", "400"), pair(":status", "404"), pair(":status", "500"), pair("accept-charset", ""), pair("accept-encoding", "gzip, deflate"), pair("accept-language", ""), pair("accept-ranges", ""), pair("accept", ""), pair("access-control-allow-origin", ""), pair("age", ""), pair("allow", ""), pair("authorization", ""), pair("cache-control", ""), pair("content-disposition", ""), pair("content-encoding", ""), pair("content-language", ""), pair("content-length", ""), pair("content-location", ""), pair("content-range", ""), pair("content-type", ""), pair("cookie", ""), pair("date", ""), pair("etag", ""), pair("expect", ""), pair("expires", ""), pair("from", ""), pair("host", ""), pair("if-match", ""), pair("if-modified-since", ""), pair("if-none-match", ""), pair("if-range", ""), pair("if-unmodified-since", ""), pair("last-modified", ""), pair("link", ""), pair("location", ""), pair("max-forwards", ""), pair("proxy-authenticate", ""), pair("proxy-authorization", ""), pair("range", ""), pair("referer", ""), pair("refresh", ""), pair("retry-after", ""), pair("server", ""), pair("set-cookie", ""), pair("strict-transport-security", ""), pair("transfer-encoding", ""), pair("user-agent", ""), pair("vary", ""), pair("via", ""), pair("www-authenticate", ""), } var huffmanCodes = []uint32{ 0x1ff8, 0x7fffd8, 0xfffffe2, 0xfffffe3, 0xfffffe4, 0xfffffe5, 0xfffffe6, 0xfffffe7, 0xfffffe8, 0xffffea, 0x3ffffffc, 0xfffffe9, 0xfffffea, 0x3ffffffd, 0xfffffeb, 0xfffffec, 0xfffffed, 0xfffffee, 0xfffffef, 0xffffff0, 0xffffff1, 0xffffff2, 0x3ffffffe, 0xffffff3, 0xffffff4, 0xffffff5, 0xffffff6, 0xffffff7, 0xffffff8, 0xffffff9, 0xffffffa, 0xffffffb, 0x14, 0x3f8, 0x3f9, 0xffa, 0x1ff9, 0x15, 0xf8, 0x7fa, 0x3fa, 0x3fb, 0xf9, 0x7fb, 0xfa, 0x16, 0x17, 0x18, 0x0, 0x1, 0x2, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x5c, 0xfb, 0x7ffc, 0x20, 0xffb, 0x3fc, 0x1ffa, 0x21, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0xfc, 0x73, 0xfd, 0x1ffb, 0x7fff0, 0x1ffc, 0x3ffc, 0x22, 0x7ffd, 0x3, 0x23, 0x4, 0x24, 0x5, 0x25, 0x26, 0x27, 0x6, 0x74, 0x75, 0x28, 0x29, 0x2a, 0x7, 0x2b, 0x76, 0x2c, 0x8, 0x9, 0x2d, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7ffe, 0x7fc, 0x3ffd, 0x1ffd, 0xffffffc, 0xfffe6, 0x3fffd2, 0xfffe7, 0xfffe8, 0x3fffd3, 0x3fffd4, 0x3fffd5, 0x7fffd9, 0x3fffd6, 0x7fffda, 0x7fffdb, 0x7fffdc, 0x7fffdd, 0x7fffde, 0xffffeb, 0x7fffdf, 0xffffec, 0xffffed, 0x3fffd7, 0x7fffe0, 0xffffee, 0x7fffe1, 0x7fffe2, 0x7fffe3, 0x7fffe4, 0x1fffdc, 0x3fffd8, 0x7fffe5, 0x3fffd9, 0x7fffe6, 0x7fffe7, 0xffffef, 0x3fffda, 0x1fffdd, 0xfffe9, 0x3fffdb, 0x3fffdc, 0x7fffe8, 0x7fffe9, 0x1fffde, 0x7fffea, 0x3fffdd, 0x3fffde, 0xfffff0, 0x1fffdf, 0x3fffdf, 0x7fffeb, 0x7fffec, 0x1fffe0, 0x1fffe1, 0x3fffe0, 0x1fffe2, 0x7fffed, 0x3fffe1, 0x7fffee, 0x7fffef, 0xfffea, 0x3fffe2, 0x3fffe3, 0x3fffe4, 0x7ffff0, 0x3fffe5, 0x3fffe6, 0x7ffff1, 0x3ffffe0, 0x3ffffe1, 0xfffeb, 0x7fff1, 0x3fffe7, 0x7ffff2, 0x3fffe8, 0x1ffffec, 0x3ffffe2, 0x3ffffe3, 0x3ffffe4, 0x7ffffde, 0x7ffffdf, 0x3ffffe5, 0xfffff1, 0x1ffffed, 0x7fff2, 0x1fffe3, 0x3ffffe6, 0x7ffffe0, 0x7ffffe1, 0x3ffffe7, 0x7ffffe2, 0xfffff2, 0x1fffe4, 0x1fffe5, 0x3ffffe8, 0x3ffffe9, 0xffffffd, 0x7ffffe3, 0x7ffffe4, 0x7ffffe5, 0xfffec, 0xfffff3, 0xfffed, 0x1fffe6, 0x3fffe9, 0x1fffe7, 0x1fffe8, 0x7ffff3, 0x3fffea, 0x3fffeb, 0x1ffffee, 0x1ffffef, 0xfffff4, 0xfffff5, 0x3ffffea, 0x7ffff4, 0x3ffffeb, 0x7ffffe6, 0x3ffffec, 0x3ffffed, 0x7ffffe7, 0x7ffffe8, 0x7ffffe9, 0x7ffffea, 0x7ffffeb, 0xffffffe, 0x7ffffec, 0x7ffffed, 0x7ffffee, 0x7ffffef, 0x7fffff0, 0x3ffffee, } var huffmanCodeLen = []uint8{ 13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28, 6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10, 13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6, 15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5, 6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28, 20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23, 24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24, 22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23, 21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23, 26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25, 19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27, 20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23, 26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26, } ================================================ FILE: vendor/github.com/bradfitz/http2/http2.go ================================================ // Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // See https://code.google.com/p/go/source/browse/CONTRIBUTORS // Licensed under the same terms as Go itself: // https://code.google.com/p/go/source/browse/LICENSE // Package http2 implements the HTTP/2 protocol. // // This is a work in progress. This package is low-level and intended // to be used directly by very few people. Most users will use it // indirectly through integration with the net/http package. See // ConfigureServer. That ConfigureServer call will likely be automatic // or available via an empty import in the future. // // See http://http2.github.io/ package http2 import ( "bufio" "fmt" "io" "net/http" "strconv" "sync" ) var VerboseLogs = false const ( // ClientPreface is the string that must be sent by new // connections from clients. ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n" // SETTINGS_MAX_FRAME_SIZE default // http://http2.github.io/http2-spec/#rfc.section.6.5.2 initialMaxFrameSize = 16384 // NextProtoTLS is the NPN/ALPN protocol negotiated during // HTTP/2's TLS setup. NextProtoTLS = "h2" // http://http2.github.io/http2-spec/#SettingValues initialHeaderTableSize = 4096 initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size defaultMaxReadFrameSize = 1 << 20 ) var ( clientPreface = []byte(ClientPreface) ) type streamState int const ( stateIdle streamState = iota stateOpen stateHalfClosedLocal stateHalfClosedRemote stateResvLocal stateResvRemote stateClosed ) var stateName = [...]string{ stateIdle: "Idle", stateOpen: "Open", stateHalfClosedLocal: "HalfClosedLocal", stateHalfClosedRemote: "HalfClosedRemote", stateResvLocal: "ResvLocal", stateResvRemote: "ResvRemote", stateClosed: "Closed", } func (st streamState) String() string { return stateName[st] } // Setting is a setting parameter: which setting it is, and its value. type Setting struct { // ID is which setting is being set. // See http://http2.github.io/http2-spec/#SettingValues ID SettingID // Val is the value. Val uint32 } func (s Setting) String() string { return fmt.Sprintf("[%v = %d]", s.ID, s.Val) } // Valid reports whether the setting is valid. func (s Setting) Valid() error { // Limits and error codes from 6.5.2 Defined SETTINGS Parameters switch s.ID { case SettingEnablePush: if s.Val != 1 && s.Val != 0 { return ConnectionError(ErrCodeProtocol) } case SettingInitialWindowSize: if s.Val > 1<<31-1 { return ConnectionError(ErrCodeFlowControl) } case SettingMaxFrameSize: if s.Val < 16384 || s.Val > 1<<24-1 { return ConnectionError(ErrCodeProtocol) } } return nil } // A SettingID is an HTTP/2 setting as defined in // http://http2.github.io/http2-spec/#iana-settings type SettingID uint16 const ( SettingHeaderTableSize SettingID = 0x1 SettingEnablePush SettingID = 0x2 SettingMaxConcurrentStreams SettingID = 0x3 SettingInitialWindowSize SettingID = 0x4 SettingMaxFrameSize SettingID = 0x5 SettingMaxHeaderListSize SettingID = 0x6 ) var settingName = map[SettingID]string{ SettingHeaderTableSize: "HEADER_TABLE_SIZE", SettingEnablePush: "ENABLE_PUSH", SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", SettingMaxFrameSize: "MAX_FRAME_SIZE", SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", } func (s SettingID) String() string { if v, ok := settingName[s]; ok { return v } return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s)) } func validHeader(v string) bool { if len(v) == 0 { return false } for _, r := range v { // "Just as in HTTP/1.x, header field names are // strings of ASCII characters that are compared in a // case-insensitive fashion. However, header field // names MUST be converted to lowercase prior to their // encoding in HTTP/2. " if r >= 127 || ('A' <= r && r <= 'Z') { return false } } return true } var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n) func init() { for i := 100; i <= 999; i++ { if v := http.StatusText(i); v != "" { httpCodeStringCommon[i] = strconv.Itoa(i) } } } func httpCodeString(code int) string { if s, ok := httpCodeStringCommon[code]; ok { return s } return strconv.Itoa(code) } // from pkg io type stringWriter interface { WriteString(s string) (n int, err error) } // A gate lets two goroutines coordinate their activities. type gate chan struct{} func (g gate) Done() { g <- struct{}{} } func (g gate) Wait() { <-g } // A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed). type closeWaiter chan struct{} // Init makes a closeWaiter usable. // It exists because so a closeWaiter value can be placed inside a // larger struct and have the Mutex and Cond's memory in the same // allocation. func (cw *closeWaiter) Init() { *cw = make(chan struct{}) } // Close marks the closeWaiter as closed and unblocks any waiters. func (cw closeWaiter) Close() { close(cw) } // Wait waits for the closeWaiter to become closed. func (cw closeWaiter) Wait() { <-cw } // bufferedWriter is a buffered writer that writes to w. // Its buffered writer is lazily allocated as needed, to minimize // idle memory usage with many connections. type bufferedWriter struct { w io.Writer // immutable bw *bufio.Writer // non-nil when data is buffered } func newBufferedWriter(w io.Writer) *bufferedWriter { return &bufferedWriter{w: w} } var bufWriterPool = sync.Pool{ New: func() interface{} { // TODO: pick something better? this is a bit under // (3 x typical 1500 byte MTU) at least. return bufio.NewWriterSize(nil, 4<<10) }, } func (w *bufferedWriter) Write(p []byte) (n int, err error) { if w.bw == nil { bw := bufWriterPool.Get().(*bufio.Writer) bw.Reset(w.w) w.bw = bw } return w.bw.Write(p) } func (w *bufferedWriter) Flush() error { bw := w.bw if bw == nil { return nil } err := bw.Flush() bw.Reset(nil) bufWriterPool.Put(bw) w.bw = nil return err } ================================================ FILE: vendor/github.com/bradfitz/http2/pipe.go ================================================ // Copyright 2014 The Go Authors. // See https://code.google.com/p/go/source/browse/CONTRIBUTORS // Licensed under the same terms as Go itself: // https://code.google.com/p/go/source/browse/LICENSE package http2 import ( "sync" ) type pipe struct { b buffer c sync.Cond m sync.Mutex } // Read waits until data is available and copies bytes // from the buffer into p. func (r *pipe) Read(p []byte) (n int, err error) { r.c.L.Lock() defer r.c.L.Unlock() for r.b.Len() == 0 && !r.b.closed { r.c.Wait() } return r.b.Read(p) } // Write copies bytes from p into the buffer and wakes a reader. // It is an error to write more data than the buffer can hold. func (w *pipe) Write(p []byte) (n int, err error) { w.c.L.Lock() defer w.c.L.Unlock() defer w.c.Signal() return w.b.Write(p) } func (c *pipe) Close(err error) { c.c.L.Lock() defer c.c.L.Unlock() defer c.c.Signal() c.b.Close(err) } ================================================ FILE: vendor/github.com/bradfitz/http2/server.go ================================================ // Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // See https://code.google.com/p/go/source/browse/CONTRIBUTORS // Licensed under the same terms as Go itself: // https://code.google.com/p/go/source/browse/LICENSE // TODO: replace all <-sc.doneServing with reads from the stream's cw // instead, and make sure that on close we close all open // streams. then remove doneServing? // TODO: finish GOAWAY support. Consider each incoming frame type and // whether it should be ignored during a shutdown race. // TODO: disconnect idle clients. GFE seems to do 4 minutes. make // configurable? or maximum number of idle clients and remove the // oldest? // TODO: turn off the serve goroutine when idle, so // an idle conn only has the readFrames goroutine active. (which could // also be optimized probably to pin less memory in crypto/tls). This // would involve tracking when the serve goroutine is active (atomic // int32 read/CAS probably?) and starting it up when frames arrive, // and shutting it down when all handlers exit. the occasional PING // packets could use time.AfterFunc to call sc.wakeStartServeLoop() // (which is a no-op if already running) and then queue the PING write // as normal. The serve loop would then exit in most cases (if no // Handlers running) and not be woken up again until the PING packet // returns. // TODO (maybe): add a mechanism for Handlers to going into // half-closed-local mode (rw.(io.Closer) test?) but not exit their // handler, and continue to be able to read from the // Request.Body. This would be a somewhat semantic change from HTTP/1 // (or at least what we expose in net/http), so I'd probably want to // add it there too. For now, this package says that returning from // the Handler ServeHTTP function means you're both done reading and // done writing, without a way to stop just one or the other. package http2 import ( "bufio" "bytes" "crypto/tls" "errors" "fmt" "io" "log" "net" "net/http" "net/url" "strconv" "strings" "sync" "time" "github.com/bradfitz/http2/hpack" ) const ( prefaceTimeout = 10 * time.Second firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway handlerChunkWriteSize = 4 << 10 defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? ) var ( errClientDisconnected = errors.New("client disconnected") errClosedBody = errors.New("body closed by handler") errStreamBroken = errors.New("http2: stream broken") ) var responseWriterStatePool = sync.Pool{ New: func() interface{} { rws := &responseWriterState{} rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize) return rws }, } // Test hooks. var ( testHookOnConn func() testHookGetServerConn func(*serverConn) testHookOnPanicMu *sync.Mutex // nil except in tests testHookOnPanic func(sc *serverConn, panicVal interface{}) (rePanic bool) ) // Server is an HTTP/2 server. type Server struct { // MaxHandlers limits the number of http.Handler ServeHTTP goroutines // which may run at a time over all connections. // Negative or zero no limit. // TODO: implement MaxHandlers int // MaxConcurrentStreams optionally specifies the number of // concurrent streams that each client may have open at a // time. This is unrelated to the number of http.Handler goroutines // which may be active globally, which is MaxHandlers. // If zero, MaxConcurrentStreams defaults to at least 100, per // the HTTP/2 spec's recommendations. MaxConcurrentStreams uint32 // MaxReadFrameSize optionally specifies the largest frame // this server is willing to read. A valid value is between // 16k and 16M, inclusive. If zero or otherwise invalid, a // default value is used. MaxReadFrameSize uint32 // PermitProhibitedCipherSuites, if true, permits the use of // cipher suites prohibited by the HTTP/2 spec. PermitProhibitedCipherSuites bool } func (s *Server) maxReadFrameSize() uint32 { if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { return v } return defaultMaxReadFrameSize } func (s *Server) maxConcurrentStreams() uint32 { if v := s.MaxConcurrentStreams; v > 0 { return v } return defaultMaxStreams } // ConfigureServer adds HTTP/2 support to a net/http Server. // // The configuration conf may be nil. // // ConfigureServer must be called before s begins serving. func ConfigureServer(s *http.Server, conf *Server) { if conf == nil { conf = new(Server) } if s.TLSConfig == nil { s.TLSConfig = new(tls.Config) } // Note: not setting MinVersion to tls.VersionTLS12, // as we don't want to interfere with HTTP/1.1 traffic // on the user's server. We enforce TLS 1.2 later once // we accept a connection. Ideally this should be done // during next-proto selection, but using TLS <1.2 with // HTTP/2 is still the client's bug. // Be sure we advertise tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 // at least. // TODO: enable PreferServerCipherSuites? if s.TLSConfig.CipherSuites != nil { const requiredCipher = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 haveRequired := false for _, v := range s.TLSConfig.CipherSuites { if v == requiredCipher { haveRequired = true break } } if !haveRequired { s.TLSConfig.CipherSuites = append(s.TLSConfig.CipherSuites, requiredCipher) } } haveNPN := false for _, p := range s.TLSConfig.NextProtos { if p == NextProtoTLS { haveNPN = true break } } if !haveNPN { s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS) } // h2-14 is temporary (as of 2015-03-05) while we wait for all browsers // to switch to "h2". s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, "h2-14") if s.TLSNextProto == nil { s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} } protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) { if testHookOnConn != nil { testHookOnConn() } conf.handleConn(hs, c, h) } s.TLSNextProto[NextProtoTLS] = protoHandler s.TLSNextProto["h2-14"] = protoHandler // temporary; see above. } func (srv *Server) handleConn(hs *http.Server, c net.Conn, h http.Handler) { sc := &serverConn{ srv: srv, hs: hs, conn: c, remoteAddrStr: c.RemoteAddr().String(), bw: newBufferedWriter(c), handler: h, streams: make(map[uint32]*stream), readFrameCh: make(chan frameAndGate), readFrameErrCh: make(chan error, 1), // must be buffered for 1 wantWriteFrameCh: make(chan frameWriteMsg, 8), wroteFrameCh: make(chan struct{}, 1), // buffered; one send in reading goroutine bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way doneServing: make(chan struct{}), advMaxStreams: srv.maxConcurrentStreams(), writeSched: writeScheduler{ maxFrameSize: initialMaxFrameSize, }, initialWindowSize: initialWindowSize, headerTableSize: initialHeaderTableSize, serveG: newGoroutineLock(), pushEnabled: true, } sc.flow.add(initialWindowSize) sc.inflow.add(initialWindowSize) sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) sc.hpackDecoder = hpack.NewDecoder(initialHeaderTableSize, sc.onNewHeaderField) fr := NewFramer(sc.bw, c) fr.SetMaxReadFrameSize(srv.maxReadFrameSize()) sc.framer = fr if tc, ok := c.(*tls.Conn); ok { sc.tlsState = new(tls.ConnectionState) *sc.tlsState = tc.ConnectionState() // 9.2 Use of TLS Features // An implementation of HTTP/2 over TLS MUST use TLS // 1.2 or higher with the restrictions on feature set // and cipher suite described in this section. Due to // implementation limitations, it might not be // possible to fail TLS negotiation. An endpoint MUST // immediately terminate an HTTP/2 connection that // does not meet the TLS requirements described in // this section with a connection error (Section // 5.4.1) of type INADEQUATE_SECURITY. if sc.tlsState.Version < tls.VersionTLS12 { sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low") return } if sc.tlsState.ServerName == "" { // Client must use SNI, but we don't enforce that anymore, // since it was causing problems when connecting to bare IP // addresses during development. // // TODO: optionally enforce? Or enforce at the time we receive // a new request, and verify the the ServerName matches the :authority? // But that precludes proxy situations, perhaps. // // So for now, do nothing here again. } if !srv.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { // "Endpoints MAY choose to generate a connection error // (Section 5.4.1) of type INADEQUATE_SECURITY if one of // the prohibited cipher suites are negotiated." // // We choose that. In my opinion, the spec is weak // here. It also says both parties must support at least // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no // excuses here. If we really must, we could allow an // "AllowInsecureWeakCiphers" option on the server later. // Let's see how it plays out first. sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite)) return } } if hook := testHookGetServerConn; hook != nil { hook(sc) } sc.serve() } // isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. func isBadCipher(cipher uint16) bool { switch cipher { case tls.TLS_RSA_WITH_RC4_128_SHA, tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, tls.TLS_RSA_WITH_AES_128_CBC_SHA, tls.TLS_RSA_WITH_AES_256_CBC_SHA, tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: // Reject cipher suites from Appendix A. // "This list includes those cipher suites that do not // offer an ephemeral key exchange and those that are // based on the TLS null, stream or block cipher type" return true default: return false } } func (sc *serverConn) rejectConn(err ErrCode, debug string) { log.Printf("REJECTING conn: %v, %s", err, debug) // ignoring errors. hanging up anyway. sc.framer.WriteGoAway(0, err, []byte(debug)) sc.bw.Flush() sc.conn.Close() } // frameAndGates coordinates the readFrames and serve // goroutines. Because the Framer interface only permits the most // recently-read Frame from being accessed, the readFrames goroutine // blocks until it has a frame, passes it to serve, and then waits for // serve to be done with it before reading the next one. type frameAndGate struct { f Frame g gate } type serverConn struct { // Immutable: srv *Server hs *http.Server conn net.Conn bw *bufferedWriter // writing to conn handler http.Handler framer *Framer hpackDecoder *hpack.Decoder doneServing chan struct{} // closed when serverConn.serve ends readFrameCh chan frameAndGate // written by serverConn.readFrames readFrameErrCh chan error wantWriteFrameCh chan frameWriteMsg // from handlers -> serve wroteFrameCh chan struct{} // from writeFrameAsync -> serve, tickles more frame writes bodyReadCh chan bodyReadMsg // from handlers -> serve testHookCh chan func() // code to run on the serve loop flow flow // conn-wide (not stream-specific) outbound flow control inflow flow // conn-wide inbound flow control tlsState *tls.ConnectionState // shared by all handlers, like net/http remoteAddrStr string // Everything following is owned by the serve loop; use serveG.check(): serveG goroutineLock // used to verify funcs are on serve() pushEnabled bool sawFirstSettings bool // got the initial SETTINGS frame after the preface needToSendSettingsAck bool unackedSettings int // how many SETTINGS have we sent without ACKs? clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client curOpenStreams uint32 // client's number of open streams maxStreamID uint32 // max ever seen streams map[uint32]*stream initialWindowSize int32 headerTableSize uint32 maxHeaderListSize uint32 // zero means unknown (default) canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case req requestParam // non-zero while reading request headers writingFrame bool // started write goroutine but haven't heard back on wroteFrameCh needsFrameFlush bool // last frame write wasn't a flush writeSched writeScheduler inGoAway bool // we've started to or sent GOAWAY needToSendGoAway bool // we need to schedule a GOAWAY frame write goAwayCode ErrCode shutdownTimerCh <-chan time.Time // nil until used shutdownTimer *time.Timer // nil until used // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer hpackEncoder *hpack.Encoder } // requestParam is the state of the next request, initialized over // potentially several frames HEADERS + zero or more CONTINUATION // frames. type requestParam struct { // stream is non-nil if we're reading (HEADER or CONTINUATION) // frames for a request (but not DATA). stream *stream header http.Header method, path string scheme, authority string sawRegularHeader bool // saw a non-pseudo header already invalidHeader bool // an invalid header was seen } // stream represents a stream. This is the minimal metadata needed by // the serve goroutine. Most of the actual stream state is owned by // the http.Handler's goroutine in the responseWriter. Because the // responseWriter's responseWriterState is recycled at the end of a // handler, this struct intentionally has no pointer to the // *responseWriter{,State} itself, as the Handler ending nils out the // responseWriter's state field. type stream struct { // immutable: id uint32 body *pipe // non-nil if expecting DATA frames cw closeWaiter // closed wait stream transitions to closed state // owned by serverConn's serve loop: bodyBytes int64 // body bytes seen so far declBodyBytes int64 // or -1 if undeclared flow flow // limits writing from Handler to client inflow flow // what the client is allowed to POST/etc to us parent *stream // or nil weight uint8 state streamState sentReset bool // only true once detached from streams map gotReset bool // only true once detacted from streams map } func (sc *serverConn) Framer() *Framer { return sc.framer } func (sc *serverConn) CloseConn() error { return sc.conn.Close() } func (sc *serverConn) Flush() error { return sc.bw.Flush() } func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) { return sc.hpackEncoder, &sc.headerWriteBuf } func (sc *serverConn) state(streamID uint32) (streamState, *stream) { sc.serveG.check() // http://http2.github.io/http2-spec/#rfc.section.5.1 if st, ok := sc.streams[streamID]; ok { return st.state, st } // "The first use of a new stream identifier implicitly closes all // streams in the "idle" state that might have been initiated by // that peer with a lower-valued stream identifier. For example, if // a client sends a HEADERS frame on stream 7 without ever sending a // frame on stream 5, then stream 5 transitions to the "closed" // state when the first frame for stream 7 is sent or received." if streamID <= sc.maxStreamID { return stateClosed, nil } return stateIdle, nil } func (sc *serverConn) vlogf(format string, args ...interface{}) { if VerboseLogs { sc.logf(format, args...) } } func (sc *serverConn) logf(format string, args ...interface{}) { if lg := sc.hs.ErrorLog; lg != nil { lg.Printf(format, args...) } else { log.Printf(format, args...) } } func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { if err == nil { return } str := err.Error() if err == io.EOF || strings.Contains(str, "use of closed network connection") { // Boring, expected errors. sc.vlogf(format, args...) } else { sc.logf(format, args...) } } func (sc *serverConn) onNewHeaderField(f hpack.HeaderField) { sc.serveG.check() sc.vlogf("got header field %+v", f) switch { case !validHeader(f.Name): sc.req.invalidHeader = true case strings.HasPrefix(f.Name, ":"): if sc.req.sawRegularHeader { sc.logf("pseudo-header after regular header") sc.req.invalidHeader = true return } var dst *string switch f.Name { case ":method": dst = &sc.req.method case ":path": dst = &sc.req.path case ":scheme": dst = &sc.req.scheme case ":authority": dst = &sc.req.authority default: // 8.1.2.1 Pseudo-Header Fields // "Endpoints MUST treat a request or response // that contains undefined or invalid // pseudo-header fields as malformed (Section // 8.1.2.6)." sc.logf("invalid pseudo-header %q", f.Name) sc.req.invalidHeader = true return } if *dst != "" { sc.logf("duplicate pseudo-header %q sent", f.Name) sc.req.invalidHeader = true return } *dst = f.Value case f.Name == "cookie": sc.req.sawRegularHeader = true if s, ok := sc.req.header["Cookie"]; ok && len(s) == 1 { s[0] = s[0] + "; " + f.Value } else { sc.req.header.Add("Cookie", f.Value) } default: sc.req.sawRegularHeader = true sc.req.header.Add(sc.canonicalHeader(f.Name), f.Value) } } func (sc *serverConn) canonicalHeader(v string) string { sc.serveG.check() cv, ok := commonCanonHeader[v] if ok { return cv } cv, ok = sc.canonHeader[v] if ok { return cv } if sc.canonHeader == nil { sc.canonHeader = make(map[string]string) } cv = http.CanonicalHeaderKey(v) sc.canonHeader[v] = cv return cv } // readFrames is the loop that reads incoming frames. // It's run on its own goroutine. func (sc *serverConn) readFrames() { g := make(gate, 1) for { f, err := sc.framer.ReadFrame() if err != nil { sc.readFrameErrCh <- err close(sc.readFrameCh) return } sc.readFrameCh <- frameAndGate{f, g} // We can't read another frame until this one is // processed, as the ReadFrame interface doesn't copy // memory. The Frame accessor methods access the last // frame's (shared) buffer. So we wait for the // serve goroutine to tell us it's done: g.Wait() } } // writeFrameAsync runs in its own goroutine and writes a single frame // and then reports when it's done. // At most one goroutine can be running writeFrameAsync at a time per // serverConn. func (sc *serverConn) writeFrameAsync(wm frameWriteMsg) { err := wm.write.writeFrame(sc) if ch := wm.done; ch != nil { select { case ch <- err: default: panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wm.write)) } } sc.wroteFrameCh <- struct{}{} // tickle frame selection scheduler } func (sc *serverConn) closeAllStreamsOnConnClose() { sc.serveG.check() for _, st := range sc.streams { sc.closeStream(st, errClientDisconnected) } } func (sc *serverConn) stopShutdownTimer() { sc.serveG.check() if t := sc.shutdownTimer; t != nil { t.Stop() } } func (sc *serverConn) notePanic() { if testHookOnPanicMu != nil { testHookOnPanicMu.Lock() defer testHookOnPanicMu.Unlock() } if testHookOnPanic != nil { if e := recover(); e != nil { if testHookOnPanic(sc, e) { panic(e) } } } } func (sc *serverConn) serve() { sc.serveG.check() defer sc.notePanic() defer sc.conn.Close() defer sc.closeAllStreamsOnConnClose() defer sc.stopShutdownTimer() defer close(sc.doneServing) // unblocks handlers trying to send sc.vlogf("HTTP/2 connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) sc.writeFrame(frameWriteMsg{ write: writeSettings{ {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, {SettingMaxConcurrentStreams, sc.advMaxStreams}, // TODO: more actual settings, notably // SettingInitialWindowSize, but then we also // want to bump up the conn window size the // same amount here right after the settings }, }) sc.unackedSettings++ if err := sc.readPreface(); err != nil { sc.condlogf(err, "error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) return } go sc.readFrames() // closed by defer sc.conn.Close above settingsTimer := time.NewTimer(firstSettingsTimeout) for { select { case wm := <-sc.wantWriteFrameCh: sc.writeFrame(wm) case <-sc.wroteFrameCh: if sc.writingFrame != true { panic("internal error: expected to be already writing a frame") } sc.writingFrame = false sc.scheduleFrameWrite() case fg, ok := <-sc.readFrameCh: if !ok { sc.readFrameCh = nil } if !sc.processFrameFromReader(fg, ok) { return } if settingsTimer.C != nil { settingsTimer.Stop() settingsTimer.C = nil } case m := <-sc.bodyReadCh: sc.noteBodyRead(m.st, m.n) case <-settingsTimer.C: sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr()) return case <-sc.shutdownTimerCh: sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) return case fn := <-sc.testHookCh: fn() } } } // readPreface reads the ClientPreface greeting from the peer // or returns an error on timeout or an invalid greeting. func (sc *serverConn) readPreface() error { errc := make(chan error, 1) go func() { // Read the client preface buf := make([]byte, len(ClientPreface)) if _, err := io.ReadFull(sc.conn, buf); err != nil { errc <- err } else if !bytes.Equal(buf, clientPreface) { errc <- fmt.Errorf("bogus greeting %q", buf) } else { errc <- nil } }() timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? defer timer.Stop() select { case <-timer.C: return errors.New("timeout waiting for client preface") case err := <-errc: if err == nil { sc.vlogf("client %v said hello", sc.conn.RemoteAddr()) } return err } } // writeDataFromHandler writes the data described in req to stream.id. // // The provided ch is used to avoid allocating new channels for each // write operation. It's expected that the caller reuses writeData and ch // over time. // // The flow control currently happens in the Handler where it waits // for 1 or more bytes to be available to then write here. So at this // point we know that we have flow control. But this might have to // change when priority is implemented, so the serve goroutine knows // the total amount of bytes waiting to be sent and can can have more // scheduling decisions available. func (sc *serverConn) writeDataFromHandler(stream *stream, writeData *writeData, ch chan error) error { sc.writeFrameFromHandler(frameWriteMsg{ write: writeData, stream: stream, done: ch, }) select { case err := <-ch: return err case <-sc.doneServing: return errClientDisconnected case <-stream.cw: return errStreamBroken } } // writeFrameFromHandler sends wm to sc.wantWriteFrameCh, but aborts // if the connection has gone away. // // This must not be run from the serve goroutine itself, else it might // deadlock writing to sc.wantWriteFrameCh (which is only mildly // buffered and is read by serve itself). If you're on the serve // goroutine, call writeFrame instead. func (sc *serverConn) writeFrameFromHandler(wm frameWriteMsg) { sc.serveG.checkNotOn() // NOT select { case sc.wantWriteFrameCh <- wm: case <-sc.doneServing: // Client has closed their connection to the server. } } // writeFrame schedules a frame to write and sends it if there's nothing // already being written. // // There is no pushback here (the serve goroutine never blocks). It's // the http.Handlers that block, waiting for their previous frames to // make it onto the wire // // If you're not on the serve goroutine, use writeFrameFromHandler instead. func (sc *serverConn) writeFrame(wm frameWriteMsg) { sc.serveG.check() sc.writeSched.add(wm) sc.scheduleFrameWrite() } // startFrameWrite starts a goroutine to write wm (in a separate // goroutine since that might block on the network), and updates the // serve goroutine's state about the world, updated from info in wm. func (sc *serverConn) startFrameWrite(wm frameWriteMsg) { sc.serveG.check() if sc.writingFrame { panic("internal error: can only be writing one frame at a time") } sc.writingFrame = true st := wm.stream if st != nil { switch st.state { case stateHalfClosedLocal: panic("internal error: attempt to send frame on half-closed-local stream") case stateClosed: if st.sentReset || st.gotReset { // Skip this frame. But fake the frame write to reschedule: sc.wroteFrameCh <- struct{}{} return } panic(fmt.Sprintf("internal error: attempt to send a write %v on a closed stream", wm)) } } sc.needsFrameFlush = true if endsStream(wm.write) { if st == nil { panic("internal error: expecting non-nil stream") } switch st.state { case stateOpen: // Here we would go to stateHalfClosedLocal in // theory, but since our handler is done and // the net/http package provides no mechanism // for finishing writing to a ResponseWriter // while still reading data (see possible TODO // at top of this file), we go into closed // state here anyway, after telling the peer // we're hanging up on them. st.state = stateHalfClosedLocal // won't last long, but necessary for closeStream via resetStream errCancel := StreamError{st.id, ErrCodeCancel} sc.resetStream(errCancel) case stateHalfClosedRemote: sc.closeStream(st, nil) } } go sc.writeFrameAsync(wm) } // scheduleFrameWrite tickles the frame writing scheduler. // // If a frame is already being written, nothing happens. This will be called again // when the frame is done being written. // // If a frame isn't being written we need to send one, the best frame // to send is selected, preferring first things that aren't // stream-specific (e.g. ACKing settings), and then finding the // highest priority stream. // // If a frame isn't being written and there's nothing else to send, we // flush the write buffer. func (sc *serverConn) scheduleFrameWrite() { sc.serveG.check() if sc.writingFrame { return } if sc.needToSendGoAway { sc.needToSendGoAway = false sc.startFrameWrite(frameWriteMsg{ write: &writeGoAway{ maxStreamID: sc.maxStreamID, code: sc.goAwayCode, }, }) return } if sc.needToSendSettingsAck { sc.needToSendSettingsAck = false sc.startFrameWrite(frameWriteMsg{write: writeSettingsAck{}}) return } if !sc.inGoAway { if wm, ok := sc.writeSched.take(); ok { sc.startFrameWrite(wm) return } } if sc.needsFrameFlush { sc.startFrameWrite(frameWriteMsg{write: flushFrameWriter{}}) sc.needsFrameFlush = false // after startFrameWrite, since it sets this true return } } func (sc *serverConn) goAway(code ErrCode) { sc.serveG.check() if sc.inGoAway { return } if code != ErrCodeNo { sc.shutDownIn(250 * time.Millisecond) } else { // TODO: configurable sc.shutDownIn(1 * time.Second) } sc.inGoAway = true sc.needToSendGoAway = true sc.goAwayCode = code sc.scheduleFrameWrite() } func (sc *serverConn) shutDownIn(d time.Duration) { sc.serveG.check() sc.shutdownTimer = time.NewTimer(d) sc.shutdownTimerCh = sc.shutdownTimer.C } func (sc *serverConn) resetStream(se StreamError) { sc.serveG.check() sc.writeFrame(frameWriteMsg{write: se}) if st, ok := sc.streams[se.StreamID]; ok { st.sentReset = true sc.closeStream(st, se) } } // curHeaderStreamID returns the stream ID of the header block we're // currently in the middle of reading. If this returns non-zero, the // next frame must be a CONTINUATION with this stream id. func (sc *serverConn) curHeaderStreamID() uint32 { sc.serveG.check() st := sc.req.stream if st == nil { return 0 } return st.id } // processFrameFromReader processes the serve loop's read from readFrameCh from the // frame-reading goroutine. // processFrameFromReader returns whether the connection should be kept open. func (sc *serverConn) processFrameFromReader(fg frameAndGate, fgValid bool) bool { sc.serveG.check() var clientGone bool var err error if !fgValid { err = <-sc.readFrameErrCh if err == ErrFrameTooLarge { sc.goAway(ErrCodeFrameSize) return true // goAway will close the loop } clientGone = err == io.EOF || strings.Contains(err.Error(), "use of closed network connection") if clientGone { // TODO: could we also get into this state if // the peer does a half close // (e.g. CloseWrite) because they're done // sending frames but they're still wanting // our open replies? Investigate. // TODO: add CloseWrite to crypto/tls.Conn first // so we have a way to test this? I suppose // just for testing we could have a non-TLS mode. return false } } if fgValid { f := fg.f sc.vlogf("got %v: %#v", f.Header(), f) err = sc.processFrame(f) fg.g.Done() // unblock the readFrames goroutine if err == nil { return true } } switch ev := err.(type) { case StreamError: sc.resetStream(ev) return true case goAwayFlowError: sc.goAway(ErrCodeFlowControl) return true case ConnectionError: sc.logf("%v: %v", sc.conn.RemoteAddr(), ev) sc.goAway(ErrCode(ev)) return true // goAway will handle shutdown default: if !fgValid { sc.logf("disconnecting; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err) } else { sc.logf("disconnection due to other error: %v", err) } } return false } func (sc *serverConn) processFrame(f Frame) error { sc.serveG.check() // First frame received must be SETTINGS. if !sc.sawFirstSettings { if _, ok := f.(*SettingsFrame); !ok { return ConnectionError(ErrCodeProtocol) } sc.sawFirstSettings = true } if s := sc.curHeaderStreamID(); s != 0 { if cf, ok := f.(*ContinuationFrame); !ok { return ConnectionError(ErrCodeProtocol) } else if cf.Header().StreamID != s { return ConnectionError(ErrCodeProtocol) } } switch f := f.(type) { case *SettingsFrame: return sc.processSettings(f) case *HeadersFrame: return sc.processHeaders(f) case *ContinuationFrame: return sc.processContinuation(f) case *WindowUpdateFrame: return sc.processWindowUpdate(f) case *PingFrame: return sc.processPing(f) case *DataFrame: return sc.processData(f) case *RSTStreamFrame: return sc.processResetStream(f) case *PriorityFrame: return sc.processPriority(f) case *PushPromiseFrame: // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR. return ConnectionError(ErrCodeProtocol) default: log.Printf("Ignoring frame: %v", f.Header()) return nil } } func (sc *serverConn) processPing(f *PingFrame) error { sc.serveG.check() if f.Flags.Has(FlagSettingsAck) { // 6.7 PING: " An endpoint MUST NOT respond to PING frames // containing this flag." return nil } if f.StreamID != 0 { // "PING frames are not associated with any individual // stream. If a PING frame is received with a stream // identifier field value other than 0x0, the recipient MUST // respond with a connection error (Section 5.4.1) of type // PROTOCOL_ERROR." return ConnectionError(ErrCodeProtocol) } sc.writeFrame(frameWriteMsg{write: writePingAck{f}}) return nil } func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error { sc.serveG.check() switch { case f.StreamID != 0: // stream-level flow control st := sc.streams[f.StreamID] if st == nil { // "WINDOW_UPDATE can be sent by a peer that has sent a // frame bearing the END_STREAM flag. This means that a // receiver could receive a WINDOW_UPDATE frame on a "half // closed (remote)" or "closed" stream. A receiver MUST // NOT treat this as an error, see Section 5.1." return nil } if !st.flow.add(int32(f.Increment)) { return StreamError{f.StreamID, ErrCodeFlowControl} } default: // connection-level flow control if !sc.flow.add(int32(f.Increment)) { return goAwayFlowError{} } } sc.scheduleFrameWrite() return nil } func (sc *serverConn) processResetStream(f *RSTStreamFrame) error { sc.serveG.check() state, st := sc.state(f.StreamID) if state == stateIdle { // 6.4 "RST_STREAM frames MUST NOT be sent for a // stream in the "idle" state. If a RST_STREAM frame // identifying an idle stream is received, the // recipient MUST treat this as a connection error // (Section 5.4.1) of type PROTOCOL_ERROR. return ConnectionError(ErrCodeProtocol) } if st != nil { st.gotReset = true sc.closeStream(st, StreamError{f.StreamID, f.ErrCode}) } return nil } func (sc *serverConn) closeStream(st *stream, err error) { sc.serveG.check() if st.state == stateIdle || st.state == stateClosed { panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state)) } st.state = stateClosed sc.curOpenStreams-- delete(sc.streams, st.id) if p := st.body; p != nil { p.Close(err) } st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc sc.writeSched.forgetStream(st.id) } func (sc *serverConn) processSettings(f *SettingsFrame) error { sc.serveG.check() if f.IsAck() { sc.unackedSettings-- if sc.unackedSettings < 0 { // Why is the peer ACKing settings we never sent? // The spec doesn't mention this case, but // hang up on them anyway. return ConnectionError(ErrCodeProtocol) } return nil } if err := f.ForeachSetting(sc.processSetting); err != nil { return err } sc.needToSendSettingsAck = true sc.scheduleFrameWrite() return nil } func (sc *serverConn) processSetting(s Setting) error { sc.serveG.check() if err := s.Valid(); err != nil { return err } sc.vlogf("processing setting %v", s) switch s.ID { case SettingHeaderTableSize: sc.headerTableSize = s.Val sc.hpackEncoder.SetMaxDynamicTableSize(s.Val) case SettingEnablePush: sc.pushEnabled = s.Val != 0 case SettingMaxConcurrentStreams: sc.clientMaxStreams = s.Val case SettingInitialWindowSize: return sc.processSettingInitialWindowSize(s.Val) case SettingMaxFrameSize: sc.writeSched.maxFrameSize = s.Val case SettingMaxHeaderListSize: sc.maxHeaderListSize = s.Val default: // Unknown setting: "An endpoint that receives a SETTINGS // frame with any unknown or unsupported identifier MUST // ignore that setting." } return nil } func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { sc.serveG.check() // Note: val already validated to be within range by // processSetting's Valid call. // "A SETTINGS frame can alter the initial flow control window // size for all current streams. When the value of // SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST // adjust the size of all stream flow control windows that it // maintains by the difference between the new value and the // old value." old := sc.initialWindowSize sc.initialWindowSize = int32(val) growth := sc.initialWindowSize - old // may be negative for _, st := range sc.streams { if !st.flow.add(growth) { // 6.9.2 Initial Flow Control Window Size // "An endpoint MUST treat a change to // SETTINGS_INITIAL_WINDOW_SIZE that causes any flow // control window to exceed the maximum size as a // connection error (Section 5.4.1) of type // FLOW_CONTROL_ERROR." return ConnectionError(ErrCodeFlowControl) } } return nil } func (sc *serverConn) processData(f *DataFrame) error { sc.serveG.check() // "If a DATA frame is received whose stream is not in "open" // or "half closed (local)" state, the recipient MUST respond // with a stream error (Section 5.4.2) of type STREAM_CLOSED." id := f.Header().StreamID st, ok := sc.streams[id] if !ok || st.state != stateOpen { // This includes sending a RST_STREAM if the stream is // in stateHalfClosedLocal (which currently means that // the http.Handler returned, so it's done reading & // done writing). Try to stop the client from sending // more DATA. return StreamError{id, ErrCodeStreamClosed} } if st.body == nil { panic("internal error: should have a body in this state") } data := f.Data() // Sender sending more than they'd declared? if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { st.body.Close(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) return StreamError{id, ErrCodeStreamClosed} } if len(data) > 0 { // Check whether the client has flow control quota. if int(st.inflow.available()) < len(data) { return StreamError{id, ErrCodeFlowControl} } st.inflow.take(int32(len(data))) wrote, err := st.body.Write(data) if err != nil { return StreamError{id, ErrCodeStreamClosed} } if wrote != len(data) { panic("internal error: bad Writer") } st.bodyBytes += int64(len(data)) } if f.StreamEnded() { if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes { st.body.Close(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes", st.declBodyBytes, st.bodyBytes)) } else { st.body.Close(io.EOF) } st.state = stateHalfClosedRemote } return nil } func (sc *serverConn) processHeaders(f *HeadersFrame) error { sc.serveG.check() id := f.Header().StreamID if sc.inGoAway { // Ignore. return nil } // http://http2.github.io/http2-spec/#rfc.section.5.1.1 if id%2 != 1 || id <= sc.maxStreamID || sc.req.stream != nil { // Streams initiated by a client MUST use odd-numbered // stream identifiers. [...] The identifier of a newly // established stream MUST be numerically greater than all // streams that the initiating endpoint has opened or // reserved. [...] An endpoint that receives an unexpected // stream identifier MUST respond with a connection error // (Section 5.4.1) of type PROTOCOL_ERROR. return ConnectionError(ErrCodeProtocol) } if id > sc.maxStreamID { sc.maxStreamID = id } st := &stream{ id: id, state: stateOpen, } if f.StreamEnded() { st.state = stateHalfClosedRemote } st.cw.Init() st.flow.conn = &sc.flow // link to conn-level counter st.flow.add(sc.initialWindowSize) st.inflow.conn = &sc.inflow // link to conn-level counter st.inflow.add(initialWindowSize) // TODO: update this when we send a higher initial window size in the initial settings sc.streams[id] = st if f.HasPriority() { adjustStreamPriority(sc.streams, st.id, f.Priority) } sc.curOpenStreams++ sc.req = requestParam{ stream: st, header: make(http.Header), } return sc.processHeaderBlockFragment(st, f.HeaderBlockFragment(), f.HeadersEnded()) } func (sc *serverConn) processContinuation(f *ContinuationFrame) error { sc.serveG.check() st := sc.streams[f.Header().StreamID] if st == nil || sc.curHeaderStreamID() != st.id { return ConnectionError(ErrCodeProtocol) } return sc.processHeaderBlockFragment(st, f.HeaderBlockFragment(), f.HeadersEnded()) } func (sc *serverConn) processHeaderBlockFragment(st *stream, frag []byte, end bool) error { sc.serveG.check() if _, err := sc.hpackDecoder.Write(frag); err != nil { // TODO: convert to stream error I assume? return err } if !end { return nil } if err := sc.hpackDecoder.Close(); err != nil { // TODO: convert to stream error I assume? return err } defer sc.resetPendingRequest() if sc.curOpenStreams > sc.advMaxStreams { // "Endpoints MUST NOT exceed the limit set by their // peer. An endpoint that receives a HEADERS frame // that causes their advertised concurrent stream // limit to be exceeded MUST treat this as a stream // error (Section 5.4.2) of type PROTOCOL_ERROR or // REFUSED_STREAM." if sc.unackedSettings == 0 { // They should know better. return StreamError{st.id, ErrCodeProtocol} } // Assume it's a network race, where they just haven't // received our last SETTINGS update. But actually // this can't happen yet, because we don't yet provide // a way for users to adjust server parameters at // runtime. return StreamError{st.id, ErrCodeRefusedStream} } rw, req, err := sc.newWriterAndRequest() if err != nil { return err } st.body = req.Body.(*requestBody).pipe // may be nil st.declBodyBytes = req.ContentLength go sc.runHandler(rw, req) return nil } func (sc *serverConn) processPriority(f *PriorityFrame) error { adjustStreamPriority(sc.streams, f.StreamID, f.PriorityParam) return nil } func adjustStreamPriority(streams map[uint32]*stream, streamID uint32, priority PriorityParam) { st, ok := streams[streamID] if !ok { // TODO: not quite correct (this streamID might // already exist in the dep tree, but be closed), but // close enough for now. return } st.weight = priority.Weight parent := streams[priority.StreamDep] // might be nil if parent == st { // if client tries to set this stream to be the parent of itself // ignore and keep going return } // section 5.3.3: If a stream is made dependent on one of its // own dependencies, the formerly dependent stream is first // moved to be dependent on the reprioritized stream's previous // parent. The moved dependency retains its weight. for piter := parent; piter != nil; piter = piter.parent { if piter == st { parent.parent = st.parent break } } st.parent = parent if priority.Exclusive && (st.parent != nil || priority.StreamDep == 0) { for _, openStream := range streams { if openStream != st && openStream.parent == st.parent { openStream.parent = st } } } } // resetPendingRequest zeros out all state related to a HEADERS frame // and its zero or more CONTINUATION frames sent to start a new // request. func (sc *serverConn) resetPendingRequest() { sc.serveG.check() sc.req = requestParam{} } func (sc *serverConn) newWriterAndRequest() (*responseWriter, *http.Request, error) { sc.serveG.check() rp := &sc.req if rp.invalidHeader || rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { // See 8.1.2.6 Malformed Requests and Responses: // // Malformed requests or responses that are detected // MUST be treated as a stream error (Section 5.4.2) // of type PROTOCOL_ERROR." // // 8.1.2.3 Request Pseudo-Header Fields // "All HTTP/2 requests MUST include exactly one valid // value for the :method, :scheme, and :path // pseudo-header fields" return nil, nil, StreamError{rp.stream.id, ErrCodeProtocol} } var tlsState *tls.ConnectionState // nil if not scheme https if rp.scheme == "https" { tlsState = sc.tlsState } authority := rp.authority if authority == "" { authority = rp.header.Get("Host") } needsContinue := rp.header.Get("Expect") == "100-continue" if needsContinue { rp.header.Del("Expect") } bodyOpen := rp.stream.state == stateOpen body := &requestBody{ conn: sc, stream: rp.stream, needsContinue: needsContinue, } // TODO: handle asterisk '*' requests + test url, err := url.ParseRequestURI(rp.path) if err != nil { // TODO: find the right error code? return nil, nil, StreamError{rp.stream.id, ErrCodeProtocol} } req := &http.Request{ Method: rp.method, URL: url, RemoteAddr: sc.remoteAddrStr, Header: rp.header, RequestURI: rp.path, Proto: "HTTP/2.0", ProtoMajor: 2, ProtoMinor: 0, TLS: tlsState, Host: authority, Body: body, } if bodyOpen { body.pipe = &pipe{ b: buffer{buf: make([]byte, initialWindowSize)}, // TODO: share/remove XXX } body.pipe.c.L = &body.pipe.m if vv, ok := rp.header["Content-Length"]; ok { req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) } else { req.ContentLength = -1 } } rws := responseWriterStatePool.Get().(*responseWriterState) bwSave := rws.bw *rws = responseWriterState{} // zero all the fields rws.conn = sc rws.bw = bwSave rws.bw.Reset(chunkWriter{rws}) rws.stream = rp.stream rws.req = req rws.body = body rws.frameWriteCh = make(chan error, 1) rw := &responseWriter{rws: rws} return rw, req, nil } // Run on its own goroutine. func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request) { defer rw.handlerDone() // TODO: catch panics like net/http.Server sc.handler.ServeHTTP(rw, req) } // called from handler goroutines. // h may be nil. func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders, tempCh chan error) { sc.serveG.checkNotOn() // NOT on var errc chan error if headerData.h != nil { // If there's a header map (which we don't own), so we have to block on // waiting for this frame to be written, so an http.Flush mid-handler // writes out the correct value of keys, before a handler later potentially // mutates it. errc = tempCh } sc.writeFrameFromHandler(frameWriteMsg{ write: headerData, stream: st, done: errc, }) if errc != nil { select { case <-errc: // Ignore. Just for synchronization. // Any error will be handled in the writing goroutine. case <-sc.doneServing: // Client has closed the connection. } } } // called from handler goroutines. func (sc *serverConn) write100ContinueHeaders(st *stream) { sc.writeFrameFromHandler(frameWriteMsg{ write: write100ContinueHeadersFrame{st.id}, stream: st, }) } // A bodyReadMsg tells the server loop that the http.Handler read n // bytes of the DATA from the client on the given stream. type bodyReadMsg struct { st *stream n int } // called from handler goroutines. // Notes that the handler for the given stream ID read n bytes of its body // and schedules flow control tokens to be sent. func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int) { sc.serveG.checkNotOn() // NOT on sc.bodyReadCh <- bodyReadMsg{st, n} } func (sc *serverConn) noteBodyRead(st *stream, n int) { sc.serveG.check() sc.sendWindowUpdate(nil, n) // conn-level if st.state != stateHalfClosedRemote && st.state != stateClosed { // Don't send this WINDOW_UPDATE if the stream is closed // remotely. sc.sendWindowUpdate(st, n) } } // st may be nil for conn-level func (sc *serverConn) sendWindowUpdate(st *stream, n int) { sc.serveG.check() // "The legal range for the increment to the flow control // window is 1 to 2^31-1 (2,147,483,647) octets." // A Go Read call on 64-bit machines could in theory read // a larger Read than this. Very unlikely, but we handle it here // rather than elsewhere for now. const maxUint31 = 1<<31 - 1 for n >= maxUint31 { sc.sendWindowUpdate32(st, maxUint31) n -= maxUint31 } sc.sendWindowUpdate32(st, int32(n)) } // st may be nil for conn-level func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { sc.serveG.check() if n == 0 { return } if n < 0 { panic("negative update") } var streamID uint32 if st != nil { streamID = st.id } sc.writeFrame(frameWriteMsg{ write: writeWindowUpdate{streamID: streamID, n: uint32(n)}, stream: st, }) var ok bool if st == nil { ok = sc.inflow.add(n) } else { ok = st.inflow.add(n) } if !ok { panic("internal error; sent too many window updates without decrements?") } } type requestBody struct { stream *stream conn *serverConn closed bool pipe *pipe // non-nil if we have a HTTP entity message body needsContinue bool // need to send a 100-continue } func (b *requestBody) Close() error { if b.pipe != nil { b.pipe.Close(errClosedBody) } b.closed = true return nil } func (b *requestBody) Read(p []byte) (n int, err error) { if b.needsContinue { b.needsContinue = false b.conn.write100ContinueHeaders(b.stream) } if b.pipe == nil { return 0, io.EOF } n, err = b.pipe.Read(p) if n > 0 { b.conn.noteBodyReadFromHandler(b.stream, n) } return } // responseWriter is the http.ResponseWriter implementation. It's // intentionally small (1 pointer wide) to minimize garbage. The // responseWriterState pointer inside is zeroed at the end of a // request (in handlerDone) and calls on the responseWriter thereafter // simply crash (caller's mistake), but the much larger responseWriterState // and buffers are reused between multiple requests. type responseWriter struct { rws *responseWriterState } // Optional http.ResponseWriter interfaces implemented. var ( _ http.CloseNotifier = (*responseWriter)(nil) _ http.Flusher = (*responseWriter)(nil) _ stringWriter = (*responseWriter)(nil) ) type responseWriterState struct { // immutable within a request: stream *stream req *http.Request body *requestBody // to close at end of request, if DATA frames didn't conn *serverConn // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState} // mutated by http.Handler goroutine: handlerHeader http.Header // nil until called snapHeader http.Header // snapshot of handlerHeader at WriteHeader time status int // status code passed to WriteHeader wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. sentHeader bool // have we sent the header frame? handlerDone bool // handler has finished curWrite writeData frameWriteCh chan error // re-used whenever we need to block on a frame being written closeNotifierMu sync.Mutex // guards closeNotifierCh closeNotifierCh chan bool // nil until first used } type chunkWriter struct{ rws *responseWriterState } func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) } // writeChunk writes chunks from the bufio.Writer. But because // bufio.Writer may bypass its chunking, sometimes p may be // arbitrarily large. // // writeChunk is also responsible (on the first chunk) for sending the // HEADER response. func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { if !rws.wroteHeader { rws.writeHeader(200) } if !rws.sentHeader { rws.sentHeader = true var ctype, clen string // implicit ones, if we can calculate it if rws.handlerDone && rws.snapHeader.Get("Content-Length") == "" { clen = strconv.Itoa(len(p)) } if rws.snapHeader.Get("Content-Type") == "" { ctype = http.DetectContentType(p) } endStream := rws.handlerDone && len(p) == 0 rws.conn.writeHeaders(rws.stream, &writeResHeaders{ streamID: rws.stream.id, httpResCode: rws.status, h: rws.snapHeader, endStream: endStream, contentType: ctype, contentLength: clen, }, rws.frameWriteCh) if endStream { return 0, nil } } if len(p) == 0 && !rws.handlerDone { return 0, nil } curWrite := &rws.curWrite curWrite.streamID = rws.stream.id curWrite.p = p curWrite.endStream = rws.handlerDone if err := rws.conn.writeDataFromHandler(rws.stream, curWrite, rws.frameWriteCh); err != nil { return 0, err } return len(p), nil } func (w *responseWriter) Flush() { rws := w.rws if rws == nil { panic("Header called after Handler finished") } if rws.bw.Buffered() > 0 { if err := rws.bw.Flush(); err != nil { // Ignore the error. The frame writer already knows. return } } else { // The bufio.Writer won't call chunkWriter.Write // (writeChunk with zero bytes, so we have to do it // ourselves to force the HTTP response header and/or // final DATA frame (with END_STREAM) to be sent. rws.writeChunk(nil) } } func (w *responseWriter) CloseNotify() <-chan bool { rws := w.rws if rws == nil { panic("CloseNotify called after Handler finished") } rws.closeNotifierMu.Lock() ch := rws.closeNotifierCh if ch == nil { ch = make(chan bool, 1) rws.closeNotifierCh = ch go func() { rws.stream.cw.Wait() // wait for close ch <- true }() } rws.closeNotifierMu.Unlock() return ch } func (w *responseWriter) Header() http.Header { rws := w.rws if rws == nil { panic("Header called after Handler finished") } if rws.handlerHeader == nil { rws.handlerHeader = make(http.Header) } return rws.handlerHeader } func (w *responseWriter) WriteHeader(code int) { rws := w.rws if rws == nil { panic("WriteHeader called after Handler finished") } rws.writeHeader(code) } func (rws *responseWriterState) writeHeader(code int) { if !rws.wroteHeader { rws.wroteHeader = true rws.status = code if len(rws.handlerHeader) > 0 { rws.snapHeader = cloneHeader(rws.handlerHeader) } } } func cloneHeader(h http.Header) http.Header { h2 := make(http.Header, len(h)) for k, vv := range h { vv2 := make([]string, len(vv)) copy(vv2, vv) h2[k] = vv2 } return h2 } // The Life Of A Write is like this: // // * Handler calls w.Write or w.WriteString -> // * -> rws.bw (*bufio.Writer) -> // * (Handler migth call Flush) // * -> chunkWriter{rws} // * -> responseWriterState.writeChunk(p []byte) // * -> responseWriterState.writeChunk (most of the magic; see comment there) func (w *responseWriter) Write(p []byte) (n int, err error) { return w.write(len(p), p, "") } func (w *responseWriter) WriteString(s string) (n int, err error) { return w.write(len(s), nil, s) } // either dataB or dataS is non-zero. func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) { rws := w.rws if rws == nil { panic("Write called after Handler finished") } if !rws.wroteHeader { w.WriteHeader(200) } if dataB != nil { return rws.bw.Write(dataB) } else { return rws.bw.WriteString(dataS) } } func (w *responseWriter) handlerDone() { rws := w.rws if rws == nil { panic("handlerDone called twice") } rws.handlerDone = true w.Flush() w.rws = nil responseWriterStatePool.Put(rws) } ================================================ FILE: vendor/github.com/bradfitz/http2/transport.go ================================================ // Copyright 2015 The Go Authors. // See https://go.googlesource.com/go/+/master/CONTRIBUTORS // Licensed under the same terms as Go itself: // https://go.googlesource.com/go/+/master/LICENSE package http2 import ( "bufio" "bytes" "crypto/tls" "errors" "fmt" "io" "log" "net" "net/http" "strconv" "strings" "sync" "github.com/bradfitz/http2/hpack" ) type Transport struct { Fallback http.RoundTripper // TODO: remove this and make more general with a TLS dial hook, like http InsecureTLSDial bool connMu sync.Mutex conns map[string][]*clientConn // key is host:port } type clientConn struct { t *Transport tconn *tls.Conn tlsState *tls.ConnectionState connKey []string // key(s) this connection is cached in, in t.conns readerDone chan struct{} // closed on error readerErr error // set before readerDone is closed hdec *hpack.Decoder nextRes *http.Response mu sync.Mutex closed bool goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received streams map[uint32]*clientStream nextStreamID uint32 bw *bufio.Writer werr error // first write error that has occurred br *bufio.Reader fr *Framer // Settings from peer: maxFrameSize uint32 maxConcurrentStreams uint32 initialWindowSize uint32 hbuf bytes.Buffer // HPACK encoder writes into this henc *hpack.Encoder } type clientStream struct { ID uint32 resc chan resAndError pw *io.PipeWriter pr *io.PipeReader } type stickyErrWriter struct { w io.Writer err *error } func (sew stickyErrWriter) Write(p []byte) (n int, err error) { if *sew.err != nil { return 0, *sew.err } n, err = sew.w.Write(p) *sew.err = err return } func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { if req.URL.Scheme != "https" { if t.Fallback == nil { return nil, errors.New("http2: unsupported scheme and no Fallback") } return t.Fallback.RoundTrip(req) } host, port, err := net.SplitHostPort(req.URL.Host) if err != nil { host = req.URL.Host port = "443" } for { cc, err := t.getClientConn(host, port) if err != nil { return nil, err } res, err := cc.roundTrip(req) if shouldRetryRequest(err) { // TODO: or clientconn is overloaded (too many outstanding requests)? continue } if err != nil { return nil, err } return res, nil } } // CloseIdleConnections closes any connections which were previously // connected from previous requests but are now sitting idle. // It does not interrupt any connections currently in use. func (t *Transport) CloseIdleConnections() { t.connMu.Lock() defer t.connMu.Unlock() for _, vv := range t.conns { for _, cc := range vv { cc.closeIfIdle() } } } var errClientConnClosed = errors.New("http2: client conn is closed") func shouldRetryRequest(err error) bool { // TODO: or GOAWAY graceful shutdown stuff return err == errClientConnClosed } func (t *Transport) removeClientConn(cc *clientConn) { t.connMu.Lock() defer t.connMu.Unlock() for _, key := range cc.connKey { vv, ok := t.conns[key] if !ok { continue } newList := filterOutClientConn(vv, cc) if len(newList) > 0 { t.conns[key] = newList } else { delete(t.conns, key) } } } func filterOutClientConn(in []*clientConn, exclude *clientConn) []*clientConn { out := in[:0] for _, v := range in { if v != exclude { out = append(out, v) } } return out } func (t *Transport) getClientConn(host, port string) (*clientConn, error) { t.connMu.Lock() defer t.connMu.Unlock() key := net.JoinHostPort(host, port) for _, cc := range t.conns[key] { if cc.canTakeNewRequest() { return cc, nil } } if t.conns == nil { t.conns = make(map[string][]*clientConn) } cc, err := t.newClientConn(host, port, key) if err != nil { return nil, err } t.conns[key] = append(t.conns[key], cc) return cc, nil } func (t *Transport) newClientConn(host, port, key string) (*clientConn, error) { cfg := &tls.Config{ ServerName: host, NextProtos: []string{NextProtoTLS}, InsecureSkipVerify: t.InsecureTLSDial, } tconn, err := tls.Dial("tcp", host+":"+port, cfg) if err != nil { return nil, err } if err := tconn.Handshake(); err != nil { return nil, err } if !t.InsecureTLSDial { if err := tconn.VerifyHostname(cfg.ServerName); err != nil { return nil, err } } state := tconn.ConnectionState() if p := state.NegotiatedProtocol; p != NextProtoTLS { // TODO(bradfitz): fall back to Fallback return nil, fmt.Errorf("bad protocol: %v", p) } if !state.NegotiatedProtocolIsMutual { return nil, errors.New("could not negotiate protocol mutually") } if _, err := tconn.Write(clientPreface); err != nil { return nil, err } cc := &clientConn{ t: t, tconn: tconn, connKey: []string{key}, // TODO: cert's validated hostnames too tlsState: &state, readerDone: make(chan struct{}), nextStreamID: 1, maxFrameSize: 16 << 10, // spec default initialWindowSize: 65535, // spec default maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. streams: make(map[uint32]*clientStream), } cc.bw = bufio.NewWriter(stickyErrWriter{tconn, &cc.werr}) cc.br = bufio.NewReader(tconn) cc.fr = NewFramer(cc.bw, cc.br) cc.henc = hpack.NewEncoder(&cc.hbuf) cc.fr.WriteSettings() // TODO: re-send more conn-level flow control tokens when server uses all these. cc.fr.WriteWindowUpdate(0, 1<<30) // um, 0x7fffffff doesn't work to Google? it hangs? cc.bw.Flush() if cc.werr != nil { return nil, cc.werr } // Read the obligatory SETTINGS frame f, err := cc.fr.ReadFrame() if err != nil { return nil, err } sf, ok := f.(*SettingsFrame) if !ok { return nil, fmt.Errorf("expected settings frame, got: %T", f) } cc.fr.WriteSettingsAck() cc.bw.Flush() sf.ForeachSetting(func(s Setting) error { switch s.ID { case SettingMaxFrameSize: cc.maxFrameSize = s.Val case SettingMaxConcurrentStreams: cc.maxConcurrentStreams = s.Val case SettingInitialWindowSize: cc.initialWindowSize = s.Val default: // TODO(bradfitz): handle more log.Printf("Unhandled Setting: %v", s) } return nil }) // TODO: figure out henc size cc.hdec = hpack.NewDecoder(initialHeaderTableSize, cc.onNewHeaderField) go cc.readLoop() return cc, nil } func (cc *clientConn) setGoAway(f *GoAwayFrame) { cc.mu.Lock() defer cc.mu.Unlock() cc.goAway = f } func (cc *clientConn) canTakeNewRequest() bool { cc.mu.Lock() defer cc.mu.Unlock() return cc.goAway == nil && int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) && cc.nextStreamID < 2147483647 } func (cc *clientConn) closeIfIdle() { cc.mu.Lock() if len(cc.streams) > 0 { cc.mu.Unlock() return } cc.closed = true // TODO: do clients send GOAWAY too? maybe? Just Close: cc.mu.Unlock() cc.tconn.Close() } func (cc *clientConn) roundTrip(req *http.Request) (*http.Response, error) { cc.mu.Lock() if cc.closed { cc.mu.Unlock() return nil, errClientConnClosed } cs := cc.newStream() hasBody := false // TODO // we send: HEADERS[+CONTINUATION] + (DATA?) hdrs := cc.encodeHeaders(req) first := true for len(hdrs) > 0 { chunk := hdrs if len(chunk) > int(cc.maxFrameSize) { chunk = chunk[:cc.maxFrameSize] } hdrs = hdrs[len(chunk):] endHeaders := len(hdrs) == 0 if first { cc.fr.WriteHeaders(HeadersFrameParam{ StreamID: cs.ID, BlockFragment: chunk, EndStream: !hasBody, EndHeaders: endHeaders, }) first = false } else { cc.fr.WriteContinuation(cs.ID, endHeaders, chunk) } } cc.bw.Flush() werr := cc.werr cc.mu.Unlock() if hasBody { // TODO: write data. and it should probably be interleaved: // go ... io.Copy(dataFrameWriter{cc, cs, ...}, req.Body) ... etc } if werr != nil { return nil, werr } re := <-cs.resc if re.err != nil { return nil, re.err } res := re.res res.Request = req res.TLS = cc.tlsState return res, nil } // requires cc.mu be held. func (cc *clientConn) encodeHeaders(req *http.Request) []byte { cc.hbuf.Reset() // TODO(bradfitz): figure out :authority-vs-Host stuff between http2 and Go host := req.Host if host == "" { host = req.URL.Host } path := req.URL.Path if path == "" { path = "/" } cc.writeHeader(":authority", host) // probably not right for all sites cc.writeHeader(":method", req.Method) cc.writeHeader(":path", path) cc.writeHeader(":scheme", "https") for k, vv := range req.Header { lowKey := strings.ToLower(k) if lowKey == "host" { continue } for _, v := range vv { cc.writeHeader(lowKey, v) } } return cc.hbuf.Bytes() } func (cc *clientConn) writeHeader(name, value string) { log.Printf("sending %q = %q", name, value) cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) } type resAndError struct { res *http.Response err error } // requires cc.mu be held. func (cc *clientConn) newStream() *clientStream { cs := &clientStream{ ID: cc.nextStreamID, resc: make(chan resAndError, 1), } cc.nextStreamID += 2 cc.streams[cs.ID] = cs return cs } func (cc *clientConn) streamByID(id uint32, andRemove bool) *clientStream { cc.mu.Lock() defer cc.mu.Unlock() cs := cc.streams[id] if andRemove { delete(cc.streams, id) } return cs } // runs in its own goroutine. func (cc *clientConn) readLoop() { defer cc.t.removeClientConn(cc) defer close(cc.readerDone) activeRes := map[uint32]*clientStream{} // keyed by streamID // Close any response bodies if the server closes prematurely. // TODO: also do this if we've written the headers but not // gotten a response yet. defer func() { err := cc.readerErr if err == io.EOF { err = io.ErrUnexpectedEOF } for _, cs := range activeRes { cs.pw.CloseWithError(err) } }() // continueStreamID is the stream ID we're waiting for // continuation frames for. var continueStreamID uint32 for { f, err := cc.fr.ReadFrame() if err != nil { cc.readerErr = err return } log.Printf("Transport received %v: %#v", f.Header(), f) streamID := f.Header().StreamID _, isContinue := f.(*ContinuationFrame) if isContinue { if streamID != continueStreamID { log.Printf("Protocol violation: got CONTINUATION with id %d; want %d", streamID, continueStreamID) cc.readerErr = ConnectionError(ErrCodeProtocol) return } } else if continueStreamID != 0 { // Continue frames need to be adjacent in the stream // and we were in the middle of headers. log.Printf("Protocol violation: got %T for stream %d, want CONTINUATION for %d", f, streamID, continueStreamID) cc.readerErr = ConnectionError(ErrCodeProtocol) return } if streamID%2 == 0 { // Ignore streams pushed from the server for now. // These always have an even stream id. continue } streamEnded := false if ff, ok := f.(streamEnder); ok { streamEnded = ff.StreamEnded() } cs := cc.streamByID(streamID, streamEnded) if cs == nil { log.Printf("Received frame for untracked stream ID %d", streamID) continue } switch f := f.(type) { case *HeadersFrame: cc.nextRes = &http.Response{ Proto: "HTTP/2.0", ProtoMajor: 2, Header: make(http.Header), } cs.pr, cs.pw = io.Pipe() cc.hdec.Write(f.HeaderBlockFragment()) case *ContinuationFrame: cc.hdec.Write(f.HeaderBlockFragment()) case *DataFrame: log.Printf("DATA: %q", f.Data()) cs.pw.Write(f.Data()) case *GoAwayFrame: cc.t.removeClientConn(cc) if f.ErrCode != 0 { // TODO: deal with GOAWAY more. particularly the error code log.Printf("transport got GOAWAY with error code = %v", f.ErrCode) } cc.setGoAway(f) default: log.Printf("Transport: unhandled response frame type %T", f) } headersEnded := false if he, ok := f.(headersEnder); ok { headersEnded = he.HeadersEnded() if headersEnded { continueStreamID = 0 } else { continueStreamID = streamID } } if streamEnded { cs.pw.Close() delete(activeRes, streamID) } if headersEnded { if cs == nil { panic("couldn't find stream") // TODO be graceful } // TODO: set the Body to one which notes the // Close and also sends the server a // RST_STREAM cc.nextRes.Body = cs.pr res := cc.nextRes activeRes[streamID] = cs cs.resc <- resAndError{res: res} } } } func (cc *clientConn) onNewHeaderField(f hpack.HeaderField) { // TODO: verifiy pseudo headers come before non-pseudo headers // TODO: verifiy the status is set log.Printf("Header field: %+v", f) if f.Name == ":status" { code, err := strconv.Atoi(f.Value) if err != nil { panic("TODO: be graceful") } cc.nextRes.Status = f.Value + " " + http.StatusText(code) cc.nextRes.StatusCode = code return } if strings.HasPrefix(f.Name, ":") { // "Endpoints MUST NOT generate pseudo-header fields other than those defined in this document." // TODO: treat as invalid? return } cc.nextRes.Header.Add(http.CanonicalHeaderKey(f.Name), f.Value) } ================================================ FILE: vendor/github.com/bradfitz/http2/write.go ================================================ // Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // See https://code.google.com/p/go/source/browse/CONTRIBUTORS // Licensed under the same terms as Go itself: // https://code.google.com/p/go/source/browse/LICENSE package http2 import ( "bytes" "fmt" "net/http" "time" "github.com/bradfitz/http2/hpack" ) // writeFramer is implemented by any type that is used to write frames. type writeFramer interface { writeFrame(writeContext) error } // writeContext is the interface needed by the various frame writer // types below. All the writeFrame methods below are scheduled via the // frame writing scheduler (see writeScheduler in writesched.go). // // This interface is implemented by *serverConn. // TODO: use it from the client code too, once it exists. type writeContext interface { Framer() *Framer Flush() error CloseConn() error // HeaderEncoder returns an HPACK encoder that writes to the // returned buffer. HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) } // endsStream reports whether the given frame writer w will locally // close the stream. func endsStream(w writeFramer) bool { switch v := w.(type) { case *writeData: return v.endStream case *writeResHeaders: return v.endStream } return false } type flushFrameWriter struct{} func (flushFrameWriter) writeFrame(ctx writeContext) error { return ctx.Flush() } type writeSettings []Setting func (s writeSettings) writeFrame(ctx writeContext) error { return ctx.Framer().WriteSettings([]Setting(s)...) } type writeGoAway struct { maxStreamID uint32 code ErrCode } func (p *writeGoAway) writeFrame(ctx writeContext) error { err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil) if p.code != 0 { ctx.Flush() // ignore error: we're hanging up on them anyway time.Sleep(50 * time.Millisecond) ctx.CloseConn() } return err } type writeData struct { streamID uint32 p []byte endStream bool } func (w *writeData) String() string { return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream) } func (w *writeData) writeFrame(ctx writeContext) error { return ctx.Framer().WriteData(w.streamID, w.endStream, w.p) } func (se StreamError) writeFrame(ctx writeContext) error { return ctx.Framer().WriteRSTStream(se.StreamID, se.Code) } type writePingAck struct{ pf *PingFrame } func (w writePingAck) writeFrame(ctx writeContext) error { return ctx.Framer().WritePing(true, w.pf.Data) } type writeSettingsAck struct{} func (writeSettingsAck) writeFrame(ctx writeContext) error { return ctx.Framer().WriteSettingsAck() } // writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames // for HTTP response headers from a server handler. type writeResHeaders struct { streamID uint32 httpResCode int h http.Header // may be nil endStream bool contentType string contentLength string } func (w *writeResHeaders) writeFrame(ctx writeContext) error { enc, buf := ctx.HeaderEncoder() buf.Reset() enc.WriteField(hpack.HeaderField{Name: ":status", Value: httpCodeString(w.httpResCode)}) for k, vv := range w.h { k = lowerHeader(k) for _, v := range vv { // TODO: more of "8.1.2.2 Connection-Specific Header Fields" if k == "transfer-encoding" && v != "trailers" { continue } enc.WriteField(hpack.HeaderField{Name: k, Value: v}) } } if w.contentType != "" { enc.WriteField(hpack.HeaderField{Name: "content-type", Value: w.contentType}) } if w.contentLength != "" { enc.WriteField(hpack.HeaderField{Name: "content-length", Value: w.contentLength}) } headerBlock := buf.Bytes() if len(headerBlock) == 0 { panic("unexpected empty hpack") } // For now we're lazy and just pick the minimum MAX_FRAME_SIZE // that all peers must support (16KB). Later we could care // more and send larger frames if the peer advertised it, but // there's little point. Most headers are small anyway (so we // generally won't have CONTINUATION frames), and extra frames // only waste 9 bytes anyway. const maxFrameSize = 16384 first := true for len(headerBlock) > 0 { frag := headerBlock if len(frag) > maxFrameSize { frag = frag[:maxFrameSize] } headerBlock = headerBlock[len(frag):] endHeaders := len(headerBlock) == 0 var err error if first { first = false err = ctx.Framer().WriteHeaders(HeadersFrameParam{ StreamID: w.streamID, BlockFragment: frag, EndStream: w.endStream, EndHeaders: endHeaders, }) } else { err = ctx.Framer().WriteContinuation(w.streamID, endHeaders, frag) } if err != nil { return err } } return nil } type write100ContinueHeadersFrame struct { streamID uint32 } func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error { enc, buf := ctx.HeaderEncoder() buf.Reset() enc.WriteField(hpack.HeaderField{Name: ":status", Value: "100"}) return ctx.Framer().WriteHeaders(HeadersFrameParam{ StreamID: w.streamID, BlockFragment: buf.Bytes(), EndStream: false, EndHeaders: true, }) } type writeWindowUpdate struct { streamID uint32 // or 0 for conn-level n uint32 } func (wu writeWindowUpdate) writeFrame(ctx writeContext) error { return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n) } ================================================ FILE: vendor/github.com/bradfitz/http2/writesched.go ================================================ // Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // See https://code.google.com/p/go/source/browse/CONTRIBUTORS // Licensed under the same terms as Go itself: // https://code.google.com/p/go/source/browse/LICENSE package http2 import "fmt" // frameWriteMsg is a request to write a frame. type frameWriteMsg struct { // write is the interface value that does the writing, once the // writeScheduler (below) has decided to select this frame // to write. The write functions are all defined in write.go. write writeFramer stream *stream // used for prioritization. nil for non-stream frames. // done, if non-nil, must be a buffered channel with space for // 1 message and is sent the return value from write (or an // earlier error) when the frame has been written. done chan error } // for debugging only: func (wm frameWriteMsg) String() string { var streamID uint32 if wm.stream != nil { streamID = wm.stream.id } var des string if s, ok := wm.write.(fmt.Stringer); ok { des = s.String() } else { des = fmt.Sprintf("%T", wm.write) } return fmt.Sprintf("[frameWriteMsg stream=%d, ch=%v, type: %v]", streamID, wm.done != nil, des) } // writeScheduler tracks pending frames to write, priorities, and decides // the next one to use. It is not thread-safe. type writeScheduler struct { // zero are frames not associated with a specific stream. // They're sent before any stream-specific freams. zero writeQueue // maxFrameSize is the maximum size of a DATA frame // we'll write. Must be non-zero and between 16K-16M. maxFrameSize uint32 // sq contains the stream-specific queues, keyed by stream ID. // when a stream is idle, it's deleted from the map. sq map[uint32]*writeQueue // canSend is a slice of memory that's reused between frame // scheduling decisions to hold the list of writeQueues (from sq) // which have enough flow control data to send. After canSend is // built, the best is selected. canSend []*writeQueue // pool of empty queues for reuse. queuePool []*writeQueue } func (ws *writeScheduler) putEmptyQueue(q *writeQueue) { if len(q.s) != 0 { panic("queue must be empty") } ws.queuePool = append(ws.queuePool, q) } func (ws *writeScheduler) getEmptyQueue() *writeQueue { ln := len(ws.queuePool) if ln == 0 { return new(writeQueue) } q := ws.queuePool[ln-1] ws.queuePool = ws.queuePool[:ln-1] return q } func (ws *writeScheduler) empty() bool { return ws.zero.empty() && len(ws.sq) == 0 } func (ws *writeScheduler) add(wm frameWriteMsg) { st := wm.stream if st == nil { ws.zero.push(wm) } else { ws.streamQueue(st.id).push(wm) } } func (ws *writeScheduler) streamQueue(streamID uint32) *writeQueue { if q, ok := ws.sq[streamID]; ok { return q } if ws.sq == nil { ws.sq = make(map[uint32]*writeQueue) } q := ws.getEmptyQueue() ws.sq[streamID] = q return q } // take returns the most important frame to write and removes it from the scheduler. // It is illegal to call this if the scheduler is empty or if there are no connection-level // flow control bytes available. func (ws *writeScheduler) take() (wm frameWriteMsg, ok bool) { if ws.maxFrameSize == 0 { panic("internal error: ws.maxFrameSize not initialized or invalid") } // If there any frames not associated with streams, prefer those first. // These are usually SETTINGS, etc. if !ws.zero.empty() { return ws.zero.shift(), true } if len(ws.sq) == 0 { return } // Next, prioritize frames on streams that aren't DATA frames (no cost). for id, q := range ws.sq { if q.firstIsNoCost() { return ws.takeFrom(id, q) } } // Now, all that remains are DATA frames with non-zero bytes to // send. So pick the best one. if len(ws.canSend) != 0 { panic("should be empty") } for _, q := range ws.sq { if n := ws.streamWritableBytes(q); n > 0 { ws.canSend = append(ws.canSend, q) } } if len(ws.canSend) == 0 { return } defer ws.zeroCanSend() // TODO: find the best queue q := ws.canSend[0] return ws.takeFrom(q.streamID(), q) } // zeroCanSend is defered from take. func (ws *writeScheduler) zeroCanSend() { for i := range ws.canSend { ws.canSend[i] = nil } ws.canSend = ws.canSend[:0] } // streamWritableBytes returns the number of DATA bytes we could write // from the given queue's stream, if this stream/queue were // selected. It is an error to call this if q's head isn't a // *writeData. func (ws *writeScheduler) streamWritableBytes(q *writeQueue) int32 { wm := q.head() ret := wm.stream.flow.available() // max we can write if ret == 0 { return 0 } if int32(ws.maxFrameSize) < ret { ret = int32(ws.maxFrameSize) } if ret == 0 { panic("internal error: ws.maxFrameSize not initialized or invalid") } wd := wm.write.(*writeData) if len(wd.p) < int(ret) { ret = int32(len(wd.p)) } return ret } func (ws *writeScheduler) takeFrom(id uint32, q *writeQueue) (wm frameWriteMsg, ok bool) { wm = q.head() // If the first item in this queue costs flow control tokens // and we don't have enough, write as much as we can. if wd, ok := wm.write.(*writeData); ok && len(wd.p) > 0 { allowed := wm.stream.flow.available() // max we can write if allowed == 0 { // No quota available. Caller can try the next stream. return frameWriteMsg{}, false } if int32(ws.maxFrameSize) < allowed { allowed = int32(ws.maxFrameSize) } // TODO: further restrict the allowed size, because even if // the peer says it's okay to write 16MB data frames, we might // want to write smaller ones to properly weight competing // streams' priorities. if len(wd.p) > int(allowed) { wm.stream.flow.take(allowed) chunk := wd.p[:allowed] wd.p = wd.p[allowed:] // Make up a new write message of a valid size, rather // than shifting one off the queue. return frameWriteMsg{ stream: wm.stream, write: &writeData{ streamID: wd.streamID, p: chunk, // even if the original had endStream set, there // arebytes remaining because len(wd.p) > allowed, // so we know endStream is false: endStream: false, }, // our caller is blocking on the final DATA frame, not // these intermediates, so no need to wait: done: nil, }, true } wm.stream.flow.take(int32(len(wd.p))) } q.shift() if q.empty() { ws.putEmptyQueue(q) delete(ws.sq, id) } return wm, true } func (ws *writeScheduler) forgetStream(id uint32) { q, ok := ws.sq[id] if !ok { return } delete(ws.sq, id) // But keep it for others later. for i := range q.s { q.s[i] = frameWriteMsg{} } q.s = q.s[:0] ws.putEmptyQueue(q) } type writeQueue struct { s []frameWriteMsg } // streamID returns the stream ID for a non-empty stream-specific queue. func (q *writeQueue) streamID() uint32 { return q.s[0].stream.id } func (q *writeQueue) empty() bool { return len(q.s) == 0 } func (q *writeQueue) push(wm frameWriteMsg) { q.s = append(q.s, wm) } // head returns the next item that would be removed by shift. func (q *writeQueue) head() frameWriteMsg { if len(q.s) == 0 { panic("invalid use of queue") } return q.s[0] } func (q *writeQueue) shift() frameWriteMsg { if len(q.s) == 0 { panic("invalid use of queue") } wm := q.s[0] // TODO: less copy-happy queue. copy(q.s, q.s[1:]) q.s[len(q.s)-1] = frameWriteMsg{} q.s = q.s[:len(q.s)-1] return wm } func (q *writeQueue) firstIsNoCost() bool { if df, ok := q.s[0].write.(*writeData); ok { return len(df.p) == 0 } return true } ================================================ FILE: vendor/github.com/golang/glog/LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: You must give any other recipients of the Work or Derivative Works a copy of this License; and You must cause any modified files to carry prominent notices stating that You changed the files; and You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: vendor/github.com/golang/glog/README ================================================ glog ==== Leveled execution logs for Go. This is an efficient pure Go implementation of leveled logs in the manner of the open source C++ package http://code.google.com/p/google-glog By binding methods to booleans it is possible to use the log package without paying the expense of evaluating the arguments to the log. Through the -vmodule flag, the package also provides fine-grained control over logging at the file level. The comment from glog.go introduces the ideas: Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. It provides functions Info, Warning, Error, Fatal, plus formatting variants such as Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. Basic examples: glog.Info("Prepare to repel boarders") glog.Fatalf("Initialization failed: %s", err) See the documentation for the V function for an explanation of these examples: if glog.V(2) { glog.Info("Starting transaction...") } glog.V(2).Infoln("Processed", nItems, "elements") The repository contains an open source version of the log package used inside Google. The master copy of the source lives inside Google, not here. The code in this repo is for export only and is not itself under development. Feature requests will be ignored. Send bug reports to golang-nuts@googlegroups.com. ================================================ FILE: vendor/github.com/golang/glog/glog.go ================================================ // Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ // // Copyright 2013 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. // It provides functions Info, Warning, Error, Fatal, plus formatting variants such as // Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. // // Basic examples: // // glog.Info("Prepare to repel boarders") // // glog.Fatalf("Initialization failed: %s", err) // // See the documentation for the V function for an explanation of these examples: // // if glog.V(2) { // glog.Info("Starting transaction...") // } // // glog.V(2).Infoln("Processed", nItems, "elements") // // Log output is buffered and written periodically using Flush. Programs // should call Flush before exiting to guarantee all log output is written. // // By default, all log statements write to files in a temporary directory. // This package provides several flags that modify this behavior. // As a result, flag.Parse must be called before any logging is done. // // -logtostderr=false // Logs are written to standard error instead of to files. // -alsologtostderr=false // Logs are written to standard error as well as to files. // -stderrthreshold=ERROR // Log events at or above this severity are logged to standard // error as well as to files. // -log_dir="" // Log files will be written to this directory instead of the // default temporary directory. // // Other flags provide aids to debugging. // // -log_backtrace_at="" // When set to a file and line number holding a logging statement, // such as // -log_backtrace_at=gopherflakes.go:234 // a stack trace will be written to the Info log whenever execution // hits that statement. (Unlike with -vmodule, the ".go" must be // present.) // -v=0 // Enable V-leveled logging at the specified level. // -vmodule="" // The syntax of the argument is a comma-separated list of pattern=N, // where pattern is a literal file name (minus the ".go" suffix) or // "glob" pattern and N is a V level. For instance, // -vmodule=gopher*=3 // sets the V level to 3 in all Go files whose names begin "gopher". // package glog import ( "bufio" "bytes" "errors" "flag" "fmt" "io" stdLog "log" "os" "path/filepath" "runtime" "strconv" "strings" "sync" "sync/atomic" "time" ) // severity identifies the sort of log: info, warning etc. It also implements // the flag.Value interface. The -stderrthreshold flag is of type severity and // should be modified only through the flag.Value interface. The values match // the corresponding constants in C++. type severity int32 // sync/atomic int32 // These constants identify the log levels in order of increasing severity. // A message written to a high-severity log file is also written to each // lower-severity log file. const ( infoLog severity = iota warningLog errorLog fatalLog numSeverity = 4 ) const severityChar = "IWEF" var severityName = []string{ infoLog: "INFO", warningLog: "WARNING", errorLog: "ERROR", fatalLog: "FATAL", } // get returns the value of the severity. func (s *severity) get() severity { return severity(atomic.LoadInt32((*int32)(s))) } // set sets the value of the severity. func (s *severity) set(val severity) { atomic.StoreInt32((*int32)(s), int32(val)) } // String is part of the flag.Value interface. func (s *severity) String() string { return strconv.FormatInt(int64(*s), 10) } // Get is part of the flag.Value interface. func (s *severity) Get() interface{} { return *s } // Set is part of the flag.Value interface. func (s *severity) Set(value string) error { var threshold severity // Is it a known name? if v, ok := severityByName(value); ok { threshold = v } else { v, err := strconv.Atoi(value) if err != nil { return err } threshold = severity(v) } logging.stderrThreshold.set(threshold) return nil } func severityByName(s string) (severity, bool) { s = strings.ToUpper(s) for i, name := range severityName { if name == s { return severity(i), true } } return 0, false } // OutputStats tracks the number of output lines and bytes written. type OutputStats struct { lines int64 bytes int64 } // Lines returns the number of lines written. func (s *OutputStats) Lines() int64 { return atomic.LoadInt64(&s.lines) } // Bytes returns the number of bytes written. func (s *OutputStats) Bytes() int64 { return atomic.LoadInt64(&s.bytes) } // Stats tracks the number of lines of output and number of bytes // per severity level. Values must be read with atomic.LoadInt64. var Stats struct { Info, Warning, Error OutputStats } var severityStats = [numSeverity]*OutputStats{ infoLog: &Stats.Info, warningLog: &Stats.Warning, errorLog: &Stats.Error, } // Level is exported because it appears in the arguments to V and is // the type of the v flag, which can be set programmatically. // It's a distinct type because we want to discriminate it from logType. // Variables of type level are only changed under logging.mu. // The -v flag is read only with atomic ops, so the state of the logging // module is consistent. // Level is treated as a sync/atomic int32. // Level specifies a level of verbosity for V logs. *Level implements // flag.Value; the -v flag is of type Level and should be modified // only through the flag.Value interface. type Level int32 // get returns the value of the Level. func (l *Level) get() Level { return Level(atomic.LoadInt32((*int32)(l))) } // set sets the value of the Level. func (l *Level) set(val Level) { atomic.StoreInt32((*int32)(l), int32(val)) } // String is part of the flag.Value interface. func (l *Level) String() string { return strconv.FormatInt(int64(*l), 10) } // Get is part of the flag.Value interface. func (l *Level) Get() interface{} { return *l } // Set is part of the flag.Value interface. func (l *Level) Set(value string) error { v, err := strconv.Atoi(value) if err != nil { return err } logging.mu.Lock() defer logging.mu.Unlock() logging.setVState(Level(v), logging.vmodule.filter, false) return nil } // moduleSpec represents the setting of the -vmodule flag. type moduleSpec struct { filter []modulePat } // modulePat contains a filter for the -vmodule flag. // It holds a verbosity level and a file pattern to match. type modulePat struct { pattern string literal bool // The pattern is a literal string level Level } // match reports whether the file matches the pattern. It uses a string // comparison if the pattern contains no metacharacters. func (m *modulePat) match(file string) bool { if m.literal { return file == m.pattern } match, _ := filepath.Match(m.pattern, file) return match } func (m *moduleSpec) String() string { // Lock because the type is not atomic. TODO: clean this up. logging.mu.Lock() defer logging.mu.Unlock() var b bytes.Buffer for i, f := range m.filter { if i > 0 { b.WriteRune(',') } fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) } return b.String() } // Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the // struct is not exported. func (m *moduleSpec) Get() interface{} { return nil } var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") // Syntax: -vmodule=recordio=2,file=1,gfs*=3 func (m *moduleSpec) Set(value string) error { var filter []modulePat for _, pat := range strings.Split(value, ",") { if len(pat) == 0 { // Empty strings such as from a trailing comma can be ignored. continue } patLev := strings.Split(pat, "=") if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { return errVmoduleSyntax } pattern := patLev[0] v, err := strconv.Atoi(patLev[1]) if err != nil { return errors.New("syntax error: expect comma-separated list of filename=N") } if v < 0 { return errors.New("negative value for vmodule level") } if v == 0 { continue // Ignore. It's harmless but no point in paying the overhead. } // TODO: check syntax of filter? filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) } logging.mu.Lock() defer logging.mu.Unlock() logging.setVState(logging.verbosity, filter, true) return nil } // isLiteral reports whether the pattern is a literal string, that is, has no metacharacters // that require filepath.Match to be called to match the pattern. func isLiteral(pattern string) bool { return !strings.ContainsAny(pattern, `\*?[]`) } // traceLocation represents the setting of the -log_backtrace_at flag. type traceLocation struct { file string line int } // isSet reports whether the trace location has been specified. // logging.mu is held. func (t *traceLocation) isSet() bool { return t.line > 0 } // match reports whether the specified file and line matches the trace location. // The argument file name is the full path, not the basename specified in the flag. // logging.mu is held. func (t *traceLocation) match(file string, line int) bool { if t.line != line { return false } if i := strings.LastIndex(file, "/"); i >= 0 { file = file[i+1:] } return t.file == file } func (t *traceLocation) String() string { // Lock because the type is not atomic. TODO: clean this up. logging.mu.Lock() defer logging.mu.Unlock() return fmt.Sprintf("%s:%d", t.file, t.line) } // Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the // struct is not exported func (t *traceLocation) Get() interface{} { return nil } var errTraceSyntax = errors.New("syntax error: expect file.go:234") // Syntax: -log_backtrace_at=gopherflakes.go:234 // Note that unlike vmodule the file extension is included here. func (t *traceLocation) Set(value string) error { if value == "" { // Unset. t.line = 0 t.file = "" } fields := strings.Split(value, ":") if len(fields) != 2 { return errTraceSyntax } file, line := fields[0], fields[1] if !strings.Contains(file, ".") { return errTraceSyntax } v, err := strconv.Atoi(line) if err != nil { return errTraceSyntax } if v <= 0 { return errors.New("negative or zero value for level") } logging.mu.Lock() defer logging.mu.Unlock() t.line = v t.file = file return nil } // flushSyncWriter is the interface satisfied by logging destinations. type flushSyncWriter interface { Flush() error Sync() error io.Writer } func init() { flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files") flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") flag.Var(&logging.verbosity, "v", "log level for V logs") flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") // Default stderrThreshold is ERROR. logging.stderrThreshold = errorLog logging.setVState(0, nil, false) go logging.flushDaemon() } // Flush flushes all pending log I/O. func Flush() { logging.lockAndFlushAll() } // loggingT collects all the global state of the logging setup. type loggingT struct { // Boolean flags. Not handled atomically because the flag.Value interface // does not let us avoid the =true, and that shorthand is necessary for // compatibility. TODO: does this matter enough to fix? Seems unlikely. toStderr bool // The -logtostderr flag. alsoToStderr bool // The -alsologtostderr flag. // Level flag. Handled atomically. stderrThreshold severity // The -stderrthreshold flag. // freeList is a list of byte buffers, maintained under freeListMu. freeList *buffer // freeListMu maintains the free list. It is separate from the main mutex // so buffers can be grabbed and printed to without holding the main lock, // for better parallelization. freeListMu sync.Mutex // mu protects the remaining elements of this structure and is // used to synchronize logging. mu sync.Mutex // file holds writer for each of the log types. file [numSeverity]flushSyncWriter // pcs is used in V to avoid an allocation when computing the caller's PC. pcs [1]uintptr // vmap is a cache of the V Level for each V() call site, identified by PC. // It is wiped whenever the vmodule flag changes state. vmap map[uintptr]Level // filterLength stores the length of the vmodule filter chain. If greater // than zero, it means vmodule is enabled. It may be read safely // using sync.LoadInt32, but is only modified under mu. filterLength int32 // traceLocation is the state of the -log_backtrace_at flag. traceLocation traceLocation // These flags are modified only under lock, although verbosity may be fetched // safely using atomic.LoadInt32. vmodule moduleSpec // The state of the -vmodule flag. verbosity Level // V logging level, the value of the -v flag/ } // buffer holds a byte Buffer for reuse. The zero value is ready for use. type buffer struct { bytes.Buffer tmp [64]byte // temporary byte array for creating headers. next *buffer } var logging loggingT // setVState sets a consistent state for V logging. // l.mu is held. func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) { // Turn verbosity off so V will not fire while we are in transition. logging.verbosity.set(0) // Ditto for filter length. atomic.StoreInt32(&logging.filterLength, 0) // Set the new filters and wipe the pc->Level map if the filter has changed. if setFilter { logging.vmodule.filter = filter logging.vmap = make(map[uintptr]Level) } // Things are consistent now, so enable filtering and verbosity. // They are enabled in order opposite to that in V. atomic.StoreInt32(&logging.filterLength, int32(len(filter))) logging.verbosity.set(verbosity) } // getBuffer returns a new, ready-to-use buffer. func (l *loggingT) getBuffer() *buffer { l.freeListMu.Lock() b := l.freeList if b != nil { l.freeList = b.next } l.freeListMu.Unlock() if b == nil { b = new(buffer) } else { b.next = nil b.Reset() } return b } // putBuffer returns a buffer to the free list. func (l *loggingT) putBuffer(b *buffer) { if b.Len() >= 256 { // Let big buffers die a natural death. return } l.freeListMu.Lock() b.next = l.freeList l.freeList = b l.freeListMu.Unlock() } var timeNow = time.Now // Stubbed out for testing. /* header formats a log header as defined by the C++ implementation. It returns a buffer containing the formatted header and the user's file and line number. The depth specifies how many stack frames above lives the source line to be identified in the log message. Log lines have this form: Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... where the fields are defined as follows: L A single character, representing the log level (eg 'I' for INFO) mm The month (zero padded; ie May is '05') dd The day (zero padded) hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds threadid The space-padded thread ID as returned by GetTID() file The file name line The line number msg The user-supplied message */ func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { _, file, line, ok := runtime.Caller(3 + depth) if !ok { file = "???" line = 1 } else { slash := strings.LastIndex(file, "/") if slash >= 0 { file = file[slash+1:] } } return l.formatHeader(s, file, line), file, line } // formatHeader formats a log header using the provided file name and line number. func (l *loggingT) formatHeader(s severity, file string, line int) *buffer { now := timeNow() if line < 0 { line = 0 // not a real line number, but acceptable to someDigits } if s > fatalLog { s = infoLog // for safety. } buf := l.getBuffer() // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. // It's worth about 3X. Fprintf is hard. _, month, day := now.Date() hour, minute, second := now.Clock() // Lmmdd hh:mm:ss.uuuuuu threadid file:line] buf.tmp[0] = severityChar[s] buf.twoDigits(1, int(month)) buf.twoDigits(3, day) buf.tmp[5] = ' ' buf.twoDigits(6, hour) buf.tmp[8] = ':' buf.twoDigits(9, minute) buf.tmp[11] = ':' buf.twoDigits(12, second) buf.tmp[14] = '.' buf.nDigits(6, 15, now.Nanosecond()/1000, '0') buf.tmp[21] = ' ' buf.nDigits(7, 22, pid, ' ') // TODO: should be TID buf.tmp[29] = ' ' buf.Write(buf.tmp[:30]) buf.WriteString(file) buf.tmp[0] = ':' n := buf.someDigits(1, line) buf.tmp[n+1] = ']' buf.tmp[n+2] = ' ' buf.Write(buf.tmp[:n+3]) return buf } // Some custom tiny helper functions to print the log header efficiently. const digits = "0123456789" // twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i]. func (buf *buffer) twoDigits(i, d int) { buf.tmp[i+1] = digits[d%10] d /= 10 buf.tmp[i] = digits[d%10] } // nDigits formats an n-digit integer at buf.tmp[i], // padding with pad on the left. // It assumes d >= 0. func (buf *buffer) nDigits(n, i, d int, pad byte) { j := n - 1 for ; j >= 0 && d > 0; j-- { buf.tmp[i+j] = digits[d%10] d /= 10 } for ; j >= 0; j-- { buf.tmp[i+j] = pad } } // someDigits formats a zero-prefixed variable-width integer at buf.tmp[i]. func (buf *buffer) someDigits(i, d int) int { // Print into the top, then copy down. We know there's space for at least // a 10-digit number. j := len(buf.tmp) for { j-- buf.tmp[j] = digits[d%10] d /= 10 if d == 0 { break } } return copy(buf.tmp[i:], buf.tmp[j:]) } func (l *loggingT) println(s severity, args ...interface{}) { buf, file, line := l.header(s, 0) fmt.Fprintln(buf, args...) l.output(s, buf, file, line, false) } func (l *loggingT) print(s severity, args ...interface{}) { l.printDepth(s, 1, args...) } func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) { buf, file, line := l.header(s, depth) fmt.Fprint(buf, args...) if buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') } l.output(s, buf, file, line, false) } func (l *loggingT) printf(s severity, format string, args ...interface{}) { buf, file, line := l.header(s, 0) fmt.Fprintf(buf, format, args...) if buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') } l.output(s, buf, file, line, false) } // printWithFileLine behaves like print but uses the provided file and line number. If // alsoLogToStderr is true, the log message always appears on standard error; it // will also appear in the log file unless --logtostderr is set. func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) { buf := l.formatHeader(s, file, line) fmt.Fprint(buf, args...) if buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') } l.output(s, buf, file, line, alsoToStderr) } // output writes the data to the log files and releases the buffer. func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) { l.mu.Lock() if l.traceLocation.isSet() { if l.traceLocation.match(file, line) { buf.Write(stacks(false)) } } data := buf.Bytes() if l.toStderr { os.Stderr.Write(data) } else { if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { os.Stderr.Write(data) } if l.file[s] == nil { if err := l.createFiles(s); err != nil { os.Stderr.Write(data) // Make sure the message appears somewhere. l.exit(err) } } switch s { case fatalLog: l.file[fatalLog].Write(data) fallthrough case errorLog: l.file[errorLog].Write(data) fallthrough case warningLog: l.file[warningLog].Write(data) fallthrough case infoLog: l.file[infoLog].Write(data) } } if s == fatalLog { // If we got here via Exit rather than Fatal, print no stacks. if atomic.LoadUint32(&fatalNoStacks) > 0 { l.mu.Unlock() timeoutFlush(10 * time.Second) os.Exit(1) } // Dump all goroutine stacks before exiting. // First, make sure we see the trace for the current goroutine on standard error. // If -logtostderr has been specified, the loop below will do that anyway // as the first stack in the full dump. if !l.toStderr { os.Stderr.Write(stacks(false)) } // Write the stack trace for all goroutines to the files. trace := stacks(true) logExitFunc = func(error) {} // If we get a write error, we'll still exit below. for log := fatalLog; log >= infoLog; log-- { if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. f.Write(trace) } } l.mu.Unlock() timeoutFlush(10 * time.Second) os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. } l.putBuffer(buf) l.mu.Unlock() if stats := severityStats[s]; stats != nil { atomic.AddInt64(&stats.lines, 1) atomic.AddInt64(&stats.bytes, int64(len(data))) } } // timeoutFlush calls Flush and returns when it completes or after timeout // elapses, whichever happens first. This is needed because the hooks invoked // by Flush may deadlock when glog.Fatal is called from a hook that holds // a lock. func timeoutFlush(timeout time.Duration) { done := make(chan bool, 1) go func() { Flush() // calls logging.lockAndFlushAll() done <- true }() select { case <-done: case <-time.After(timeout): fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout) } } // stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. func stacks(all bool) []byte { // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. n := 10000 if all { n = 100000 } var trace []byte for i := 0; i < 5; i++ { trace = make([]byte, n) nbytes := runtime.Stack(trace, all) if nbytes < len(trace) { return trace[:nbytes] } n *= 2 } return trace } // logExitFunc provides a simple mechanism to override the default behavior // of exiting on error. Used in testing and to guarantee we reach a required exit // for fatal logs. Instead, exit could be a function rather than a method but that // would make its use clumsier. var logExitFunc func(error) // exit is called if there is trouble creating or writing log files. // It flushes the logs and exits the program; there's no point in hanging around. // l.mu is held. func (l *loggingT) exit(err error) { fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err) // If logExitFunc is set, we do that instead of exiting. if logExitFunc != nil { logExitFunc(err) return } l.flushAll() os.Exit(2) } // syncBuffer joins a bufio.Writer to its underlying file, providing access to the // file's Sync method and providing a wrapper for the Write method that provides log // file rotation. There are conflicting methods, so the file cannot be embedded. // l.mu is held for all its methods. type syncBuffer struct { logger *loggingT *bufio.Writer file *os.File sev severity nbytes uint64 // The number of bytes written to this file } func (sb *syncBuffer) Sync() error { return sb.file.Sync() } func (sb *syncBuffer) Write(p []byte) (n int, err error) { if sb.nbytes+uint64(len(p)) >= MaxSize { if err := sb.rotateFile(time.Now()); err != nil { sb.logger.exit(err) } } n, err = sb.Writer.Write(p) sb.nbytes += uint64(n) if err != nil { sb.logger.exit(err) } return } // rotateFile closes the syncBuffer's file and starts a new one. func (sb *syncBuffer) rotateFile(now time.Time) error { if sb.file != nil { sb.Flush() sb.file.Close() } var err error sb.file, _, err = create(severityName[sb.sev], now) sb.nbytes = 0 if err != nil { return err } sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) // Write header. var buf bytes.Buffer fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) fmt.Fprintf(&buf, "Running on machine: %s\n", host) fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") n, err := sb.file.Write(buf.Bytes()) sb.nbytes += uint64(n) return err } // bufferSize sizes the buffer associated with each log file. It's large // so that log records can accumulate without the logging thread blocking // on disk I/O. The flushDaemon will block instead. const bufferSize = 256 * 1024 // createFiles creates all the log files for severity from sev down to infoLog. // l.mu is held. func (l *loggingT) createFiles(sev severity) error { now := time.Now() // Files are created in decreasing severity order, so as soon as we find one // has already been created, we can stop. for s := sev; s >= infoLog && l.file[s] == nil; s-- { sb := &syncBuffer{ logger: l, sev: s, } if err := sb.rotateFile(now); err != nil { return err } l.file[s] = sb } return nil } const flushInterval = 30 * time.Second // flushDaemon periodically flushes the log file buffers. func (l *loggingT) flushDaemon() { for _ = range time.NewTicker(flushInterval).C { l.lockAndFlushAll() } } // lockAndFlushAll is like flushAll but locks l.mu first. func (l *loggingT) lockAndFlushAll() { l.mu.Lock() l.flushAll() l.mu.Unlock() } // flushAll flushes all the logs and attempts to "sync" their data to disk. // l.mu is held. func (l *loggingT) flushAll() { // Flush from fatal down, in case there's trouble flushing. for s := fatalLog; s >= infoLog; s-- { file := l.file[s] if file != nil { file.Flush() // ignore error file.Sync() // ignore error } } } // CopyStandardLogTo arranges for messages written to the Go "log" package's // default logs to also appear in the Google logs for the named and lower // severities. Subsequent changes to the standard log's default output location // or format may break this behavior. // // Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not // recognized, CopyStandardLogTo panics. func CopyStandardLogTo(name string) { sev, ok := severityByName(name) if !ok { panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name)) } // Set a log format that captures the user's file and line: // d.go:23: message stdLog.SetFlags(stdLog.Lshortfile) stdLog.SetOutput(logBridge(sev)) } // logBridge provides the Write method that enables CopyStandardLogTo to connect // Go's standard logs to the logs provided by this package. type logBridge severity // Write parses the standard logging line and passes its components to the // logger for severity(lb). func (lb logBridge) Write(b []byte) (n int, err error) { var ( file = "???" line = 1 text string ) // Split "d.go:23: message" into "d.go", "23", and "message". if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 { text = fmt.Sprintf("bad log format: %s", b) } else { file = string(parts[0]) text = string(parts[2][1:]) // skip leading space line, err = strconv.Atoi(string(parts[1])) if err != nil { text = fmt.Sprintf("bad line number: %s", b) line = 1 } } // printWithFileLine with alsoToStderr=true, so standard log messages // always appear on standard error. logging.printWithFileLine(severity(lb), file, line, true, text) return len(b), nil } // setV computes and remembers the V level for a given PC // when vmodule is enabled. // File pattern matching takes the basename of the file, stripped // of its .go suffix, and uses filepath.Match, which is a little more // general than the *? matching used in C++. // l.mu is held. func (l *loggingT) setV(pc uintptr) Level { fn := runtime.FuncForPC(pc) file, _ := fn.FileLine(pc) // The file is something like /a/b/c/d.go. We want just the d. if strings.HasSuffix(file, ".go") { file = file[:len(file)-3] } if slash := strings.LastIndex(file, "/"); slash >= 0 { file = file[slash+1:] } for _, filter := range l.vmodule.filter { if filter.match(file) { l.vmap[pc] = filter.level return filter.level } } l.vmap[pc] = 0 return 0 } // Verbose is a boolean type that implements Infof (like Printf) etc. // See the documentation of V for more information. type Verbose bool // V reports whether verbosity at the call site is at least the requested level. // The returned value is a boolean of type Verbose, which implements Info, Infoln // and Infof. These methods will write to the Info log if called. // Thus, one may write either // if glog.V(2) { glog.Info("log this") } // or // glog.V(2).Info("log this") // The second form is shorter but the first is cheaper if logging is off because it does // not evaluate its arguments. // // Whether an individual call to V generates a log record depends on the setting of // the -v and --vmodule flags; both are off by default. If the level in the call to // V is at least the value of -v, or of -vmodule for the source file containing the // call, the V call will log. func V(level Level) Verbose { // This function tries hard to be cheap unless there's work to do. // The fast path is two atomic loads and compares. // Here is a cheap but safe test to see if V logging is enabled globally. if logging.verbosity.get() >= level { return Verbose(true) } // It's off globally but it vmodule may still be set. // Here is another cheap but safe test to see if vmodule is enabled. if atomic.LoadInt32(&logging.filterLength) > 0 { // Now we need a proper lock to use the logging structure. The pcs field // is shared so we must lock before accessing it. This is fairly expensive, // but if V logging is enabled we're slow anyway. logging.mu.Lock() defer logging.mu.Unlock() if runtime.Callers(2, logging.pcs[:]) == 0 { return Verbose(false) } v, ok := logging.vmap[logging.pcs[0]] if !ok { v = logging.setV(logging.pcs[0]) } return Verbose(v >= level) } return Verbose(false) } // Info is equivalent to the global Info function, guarded by the value of v. // See the documentation of V for usage. func (v Verbose) Info(args ...interface{}) { if v { logging.print(infoLog, args...) } } // Infoln is equivalent to the global Infoln function, guarded by the value of v. // See the documentation of V for usage. func (v Verbose) Infoln(args ...interface{}) { if v { logging.println(infoLog, args...) } } // Infof is equivalent to the global Infof function, guarded by the value of v. // See the documentation of V for usage. func (v Verbose) Infof(format string, args ...interface{}) { if v { logging.printf(infoLog, format, args...) } } // Info logs to the INFO log. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Info(args ...interface{}) { logging.print(infoLog, args...) } // InfoDepth acts as Info but uses depth to determine which call frame to log. // InfoDepth(0, "msg") is the same as Info("msg"). func InfoDepth(depth int, args ...interface{}) { logging.printDepth(infoLog, depth, args...) } // Infoln logs to the INFO log. // Arguments are handled in the manner of fmt.Println; a newline is appended if missing. func Infoln(args ...interface{}) { logging.println(infoLog, args...) } // Infof logs to the INFO log. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Infof(format string, args ...interface{}) { logging.printf(infoLog, format, args...) } // Warning logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Warning(args ...interface{}) { logging.print(warningLog, args...) } // WarningDepth acts as Warning but uses depth to determine which call frame to log. // WarningDepth(0, "msg") is the same as Warning("msg"). func WarningDepth(depth int, args ...interface{}) { logging.printDepth(warningLog, depth, args...) } // Warningln logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Println; a newline is appended if missing. func Warningln(args ...interface{}) { logging.println(warningLog, args...) } // Warningf logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Warningf(format string, args ...interface{}) { logging.printf(warningLog, format, args...) } // Error logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Error(args ...interface{}) { logging.print(errorLog, args...) } // ErrorDepth acts as Error but uses depth to determine which call frame to log. // ErrorDepth(0, "msg") is the same as Error("msg"). func ErrorDepth(depth int, args ...interface{}) { logging.printDepth(errorLog, depth, args...) } // Errorln logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Println; a newline is appended if missing. func Errorln(args ...interface{}) { logging.println(errorLog, args...) } // Errorf logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Errorf(format string, args ...interface{}) { logging.printf(errorLog, format, args...) } // Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, // including a stack trace of all running goroutines, then calls os.Exit(255). // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Fatal(args ...interface{}) { logging.print(fatalLog, args...) } // FatalDepth acts as Fatal but uses depth to determine which call frame to log. // FatalDepth(0, "msg") is the same as Fatal("msg"). func FatalDepth(depth int, args ...interface{}) { logging.printDepth(fatalLog, depth, args...) } // Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, // including a stack trace of all running goroutines, then calls os.Exit(255). // Arguments are handled in the manner of fmt.Println; a newline is appended if missing. func Fatalln(args ...interface{}) { logging.println(fatalLog, args...) } // Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, // including a stack trace of all running goroutines, then calls os.Exit(255). // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Fatalf(format string, args ...interface{}) { logging.printf(fatalLog, format, args...) } // fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. // It allows Exit and relatives to use the Fatal logs. var fatalNoStacks uint32 // Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Exit(args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) logging.print(fatalLog, args...) } // ExitDepth acts as Exit but uses depth to determine which call frame to log. // ExitDepth(0, "msg") is the same as Exit("msg"). func ExitDepth(depth int, args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) logging.printDepth(fatalLog, depth, args...) } // Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). func Exitln(args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) logging.println(fatalLog, args...) } // Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Exitf(format string, args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) logging.printf(fatalLog, format, args...) } ================================================ FILE: vendor/github.com/golang/glog/glog_file.go ================================================ // Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ // // Copyright 2013 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // File I/O for logs. package glog import ( "errors" "flag" "fmt" "os" "os/user" "path/filepath" "strings" "sync" "time" ) // MaxSize is the maximum size of a log file in bytes. var MaxSize uint64 = 1024 * 1024 * 1800 // logDirs lists the candidate directories for new log files. var logDirs []string // If non-empty, overrides the choice of directory in which to write logs. // See createLogDirs for the full list of possible destinations. var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory") func createLogDirs() { if *logDir != "" { logDirs = append(logDirs, *logDir) } logDirs = append(logDirs, os.TempDir()) } var ( pid = os.Getpid() program = filepath.Base(os.Args[0]) host = "unknownhost" userName = "unknownuser" ) func init() { h, err := os.Hostname() if err == nil { host = shortHostname(h) } current, err := user.Current() if err == nil { userName = current.Username } // Sanitize userName since it may contain filepath separators on Windows. userName = strings.Replace(userName, `\`, "_", -1) } // shortHostname returns its argument, truncating at the first period. // For instance, given "www.google.com" it returns "www". func shortHostname(hostname string) string { if i := strings.Index(hostname, "."); i >= 0 { return hostname[:i] } return hostname } // logName returns a new log file name containing tag, with start time t, and // the name for the symlink for tag. func logName(tag string, t time.Time) (name, link string) { name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d", program, host, userName, tag, t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), pid) return name, program + "." + tag } var onceLogDirs sync.Once // create creates a new log file and returns the file and its filename, which // contains tag ("INFO", "FATAL", etc.) and t. If the file is created // successfully, create also attempts to update the symlink for that tag, ignoring // errors. func create(tag string, t time.Time) (f *os.File, filename string, err error) { onceLogDirs.Do(createLogDirs) if len(logDirs) == 0 { return nil, "", errors.New("log: no log dirs") } name, link := logName(tag, t) var lastErr error for _, dir := range logDirs { fname := filepath.Join(dir, name) f, err := os.Create(fname) if err == nil { symlink := filepath.Join(dir, link) os.Remove(symlink) // ignore err os.Symlink(name, symlink) // ignore err return f, fname, nil } lastErr = err } return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) } ================================================ FILE: vendor/github.com/golang/protobuf/LICENSE ================================================ Go support for Protocol Buffers - Google's data interchange format Copyright 2010 The Go Authors. All rights reserved. https://github.com/golang/protobuf Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================ FILE: vendor/github.com/golang/protobuf/proto/Makefile ================================================ # Go support for Protocol Buffers - Google's data interchange format # # Copyright 2010 The Go Authors. All rights reserved. # https://github.com/golang/protobuf # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. install: go install test: install generate-test-pbs go test generate-test-pbs: make install make -C testdata protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata:. proto3_proto/proto3.proto make ================================================ FILE: vendor/github.com/golang/protobuf/proto/clone.go ================================================ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2011 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // Protocol buffer deep copy and merge. // TODO: MessageSet and RawMessage. package proto import ( "log" "reflect" "strings" ) // Clone returns a deep copy of a protocol buffer. func Clone(pb Message) Message { in := reflect.ValueOf(pb) if in.IsNil() { return pb } out := reflect.New(in.Type().Elem()) // out is empty so a merge is a deep copy. mergeStruct(out.Elem(), in.Elem()) return out.Interface().(Message) } // Merge merges src into dst. // Required and optional fields that are set in src will be set to that value in dst. // Elements of repeated fields will be appended. // Merge panics if src and dst are not the same type, or if dst is nil. func Merge(dst, src Message) { in := reflect.ValueOf(src) out := reflect.ValueOf(dst) if out.IsNil() { panic("proto: nil destination") } if in.Type() != out.Type() { // Explicit test prior to mergeStruct so that mistyped nils will fail panic("proto: type mismatch") } if in.IsNil() { // Merging nil into non-nil is a quiet no-op return } mergeStruct(out.Elem(), in.Elem()) } func mergeStruct(out, in reflect.Value) { for i := 0; i < in.NumField(); i++ { f := in.Type().Field(i) if strings.HasPrefix(f.Name, "XXX_") { continue } mergeAny(out.Field(i), in.Field(i)) } if emIn, ok := in.Addr().Interface().(extendableProto); ok { emOut := out.Addr().Interface().(extendableProto) mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap()) } uf := in.FieldByName("XXX_unrecognized") if !uf.IsValid() { return } uin := uf.Bytes() if len(uin) > 0 { out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) } } func mergeAny(out, in reflect.Value) { if in.Type() == protoMessageType { if !in.IsNil() { if out.IsNil() { out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) } else { Merge(out.Interface().(Message), in.Interface().(Message)) } } return } switch in.Kind() { case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64: out.Set(in) case reflect.Map: if in.Len() == 0 { return } if out.IsNil() { out.Set(reflect.MakeMap(in.Type())) } // For maps with value types of *T or []byte we need to deep copy each value. elemKind := in.Type().Elem().Kind() for _, key := range in.MapKeys() { var val reflect.Value switch elemKind { case reflect.Ptr: val = reflect.New(in.Type().Elem().Elem()) mergeAny(val, in.MapIndex(key)) case reflect.Slice: val = in.MapIndex(key) val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) default: val = in.MapIndex(key) } out.SetMapIndex(key, val) } case reflect.Ptr: if in.IsNil() { return } if out.IsNil() { out.Set(reflect.New(in.Elem().Type())) } mergeAny(out.Elem(), in.Elem()) case reflect.Slice: if in.IsNil() { return } if in.Type().Elem().Kind() == reflect.Uint8 { // []byte is a scalar bytes field, not a repeated field. // Make a deep copy. // Append to []byte{} instead of []byte(nil) so that we never end up // with a nil result. out.SetBytes(append([]byte{}, in.Bytes()...)) return } n := in.Len() if out.IsNil() { out.Set(reflect.MakeSlice(in.Type(), 0, n)) } switch in.Type().Elem().Kind() { case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64: out.Set(reflect.AppendSlice(out, in)) default: for i := 0; i < n; i++ { x := reflect.Indirect(reflect.New(in.Type().Elem())) mergeAny(x, in.Index(i)) out.Set(reflect.Append(out, x)) } } case reflect.Struct: mergeStruct(out, in) default: // unknown type, so not a protocol buffer log.Printf("proto: don't know how to copy %v", in) } } func mergeExtension(out, in map[int32]Extension) { for extNum, eIn := range in { eOut := Extension{desc: eIn.desc} if eIn.value != nil { v := reflect.New(reflect.TypeOf(eIn.value)).Elem() mergeAny(v, reflect.ValueOf(eIn.value)) eOut.value = v.Interface() } if eIn.enc != nil { eOut.enc = make([]byte, len(eIn.enc)) copy(eOut.enc, eIn.enc) } out[extNum] = eOut } } ================================================ FILE: vendor/github.com/golang/protobuf/proto/decode.go ================================================ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package proto /* * Routines for decoding protocol buffer data to construct in-memory representations. */ import ( "errors" "fmt" "io" "os" "reflect" ) // errOverflow is returned when an integer is too large to be represented. var errOverflow = errors.New("proto: integer overflow") // The fundamental decoders that interpret bytes on the wire. // Those that take integer types all return uint64 and are // therefore of type valueDecoder. // DecodeVarint reads a varint-encoded integer from the slice. // It returns the integer and the number of bytes consumed, or // zero if there is not enough. // This is the format for the // int32, int64, uint32, uint64, bool, and enum // protocol buffer types. func DecodeVarint(buf []byte) (x uint64, n int) { // x, n already 0 for shift := uint(0); shift < 64; shift += 7 { if n >= len(buf) { return 0, 0 } b := uint64(buf[n]) n++ x |= (b & 0x7F) << shift if (b & 0x80) == 0 { return x, n } } // The number is too large to represent in a 64-bit value. return 0, 0 } // DecodeVarint reads a varint-encoded integer from the Buffer. // This is the format for the // int32, int64, uint32, uint64, bool, and enum // protocol buffer types. func (p *Buffer) DecodeVarint() (x uint64, err error) { // x, err already 0 i := p.index l := len(p.buf) for shift := uint(0); shift < 64; shift += 7 { if i >= l { err = io.ErrUnexpectedEOF return } b := p.buf[i] i++ x |= (uint64(b) & 0x7F) << shift if b < 0x80 { p.index = i return } } // The number is too large to represent in a 64-bit value. err = errOverflow return } // DecodeFixed64 reads a 64-bit integer from the Buffer. // This is the format for the // fixed64, sfixed64, and double protocol buffer types. func (p *Buffer) DecodeFixed64() (x uint64, err error) { // x, err already 0 i := p.index + 8 if i < 0 || i > len(p.buf) { err = io.ErrUnexpectedEOF return } p.index = i x = uint64(p.buf[i-8]) x |= uint64(p.buf[i-7]) << 8 x |= uint64(p.buf[i-6]) << 16 x |= uint64(p.buf[i-5]) << 24 x |= uint64(p.buf[i-4]) << 32 x |= uint64(p.buf[i-3]) << 40 x |= uint64(p.buf[i-2]) << 48 x |= uint64(p.buf[i-1]) << 56 return } // DecodeFixed32 reads a 32-bit integer from the Buffer. // This is the format for the // fixed32, sfixed32, and float protocol buffer types. func (p *Buffer) DecodeFixed32() (x uint64, err error) { // x, err already 0 i := p.index + 4 if i < 0 || i > len(p.buf) { err = io.ErrUnexpectedEOF return } p.index = i x = uint64(p.buf[i-4]) x |= uint64(p.buf[i-3]) << 8 x |= uint64(p.buf[i-2]) << 16 x |= uint64(p.buf[i-1]) << 24 return } // DecodeZigzag64 reads a zigzag-encoded 64-bit integer // from the Buffer. // This is the format used for the sint64 protocol buffer type. func (p *Buffer) DecodeZigzag64() (x uint64, err error) { x, err = p.DecodeVarint() if err != nil { return } x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) return } // DecodeZigzag32 reads a zigzag-encoded 32-bit integer // from the Buffer. // This is the format used for the sint32 protocol buffer type. func (p *Buffer) DecodeZigzag32() (x uint64, err error) { x, err = p.DecodeVarint() if err != nil { return } x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) return } // These are not ValueDecoders: they produce an array of bytes or a string. // bytes, embedded messages // DecodeRawBytes reads a count-delimited byte buffer from the Buffer. // This is the format used for the bytes protocol buffer // type and for embedded messages. func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { n, err := p.DecodeVarint() if err != nil { return nil, err } nb := int(n) if nb < 0 { return nil, fmt.Errorf("proto: bad byte length %d", nb) } end := p.index + nb if end < p.index || end > len(p.buf) { return nil, io.ErrUnexpectedEOF } if !alloc { // todo: check if can get more uses of alloc=false buf = p.buf[p.index:end] p.index += nb return } buf = make([]byte, nb) copy(buf, p.buf[p.index:]) p.index += nb return } // DecodeStringBytes reads an encoded string from the Buffer. // This is the format used for the proto2 string type. func (p *Buffer) DecodeStringBytes() (s string, err error) { buf, err := p.DecodeRawBytes(false) if err != nil { return } return string(buf), nil } // Skip the next item in the buffer. Its wire type is decoded and presented as an argument. // If the protocol buffer has extensions, and the field matches, add it as an extension. // Otherwise, if the XXX_unrecognized field exists, append the skipped data there. func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { oi := o.index err := o.skip(t, tag, wire) if err != nil { return err } if !unrecField.IsValid() { return nil } ptr := structPointer_Bytes(base, unrecField) // Add the skipped field to struct field obuf := o.buf o.buf = *ptr o.EncodeVarint(uint64(tag<<3 | wire)) *ptr = append(o.buf, obuf[oi:o.index]...) o.buf = obuf return nil } // Skip the next item in the buffer. Its wire type is decoded and presented as an argument. func (o *Buffer) skip(t reflect.Type, tag, wire int) error { var u uint64 var err error switch wire { case WireVarint: _, err = o.DecodeVarint() case WireFixed64: _, err = o.DecodeFixed64() case WireBytes: _, err = o.DecodeRawBytes(false) case WireFixed32: _, err = o.DecodeFixed32() case WireStartGroup: for { u, err = o.DecodeVarint() if err != nil { break } fwire := int(u & 0x7) if fwire == WireEndGroup { break } ftag := int(u >> 3) err = o.skip(t, ftag, fwire) if err != nil { break } } default: err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) } return err } // Unmarshaler is the interface representing objects that can // unmarshal themselves. The method should reset the receiver before // decoding starts. The argument points to data that may be // overwritten, so implementations should not keep references to the // buffer. type Unmarshaler interface { Unmarshal([]byte) error } // Unmarshal parses the protocol buffer representation in buf and places the // decoded result in pb. If the struct underlying pb does not match // the data in buf, the results can be unpredictable. // // Unmarshal resets pb before starting to unmarshal, so any // existing data in pb is always removed. Use UnmarshalMerge // to preserve and append to existing data. func Unmarshal(buf []byte, pb Message) error { pb.Reset() return UnmarshalMerge(buf, pb) } // UnmarshalMerge parses the protocol buffer representation in buf and // writes the decoded result to pb. If the struct underlying pb does not match // the data in buf, the results can be unpredictable. // // UnmarshalMerge merges into existing data in pb. // Most code should use Unmarshal instead. func UnmarshalMerge(buf []byte, pb Message) error { // If the object can unmarshal itself, let it. if u, ok := pb.(Unmarshaler); ok { return u.Unmarshal(buf) } return NewBuffer(buf).Unmarshal(pb) } // Unmarshal parses the protocol buffer representation in the // Buffer and places the decoded result in pb. If the struct // underlying pb does not match the data in the buffer, the results can be // unpredictable. func (p *Buffer) Unmarshal(pb Message) error { // If the object can unmarshal itself, let it. if u, ok := pb.(Unmarshaler); ok { err := u.Unmarshal(p.buf[p.index:]) p.index = len(p.buf) return err } typ, base, err := getbase(pb) if err != nil { return err } err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) if collectStats { stats.Decode++ } return err } // unmarshalType does the work of unmarshaling a structure. func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { var state errorState required, reqFields := prop.reqCount, uint64(0) var err error for err == nil && o.index < len(o.buf) { oi := o.index var u uint64 u, err = o.DecodeVarint() if err != nil { break } wire := int(u & 0x7) if wire == WireEndGroup { if is_group { return nil // input is satisfied } return fmt.Errorf("proto: %s: wiretype end group for non-group", st) } tag := int(u >> 3) if tag <= 0 { return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) } fieldnum, ok := prop.decoderTags.get(tag) if !ok { // Maybe it's an extension? if prop.extendable { if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) { if err = o.skip(st, tag, wire); err == nil { ext := e.ExtensionMap()[int32(tag)] // may be missing ext.enc = append(ext.enc, o.buf[oi:o.index]...) e.ExtensionMap()[int32(tag)] = ext } continue } } err = o.skipAndSave(st, tag, wire, base, prop.unrecField) continue } p := prop.Prop[fieldnum] if p.dec == nil { fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) continue } dec := p.dec if wire != WireStartGroup && wire != p.WireType { if wire == WireBytes && p.packedDec != nil { // a packable field dec = p.packedDec } else { err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) continue } } decErr := dec(o, p, base) if decErr != nil && !state.shouldContinue(decErr, p) { err = decErr } if err == nil && p.Required { // Successfully decoded a required field. if tag <= 64 { // use bitmap for fields 1-64 to catch field reuse. var mask uint64 = 1 << uint64(tag-1) if reqFields&mask == 0 { // new required field reqFields |= mask required-- } } else { // This is imprecise. It can be fooled by a required field // with a tag > 64 that is encoded twice; that's very rare. // A fully correct implementation would require allocating // a data structure, which we would like to avoid. required-- } } } if err == nil { if is_group { return io.ErrUnexpectedEOF } if state.err != nil { return state.err } if required > 0 { // Not enough information to determine the exact field. If we use extra // CPU, we could determine the field only if the missing required field // has a tag <= 64 and we check reqFields. return &RequiredNotSetError{"{Unknown}"} } } return err } // Individual type decoders // For each, // u is the decoded value, // v is a pointer to the field (pointer) in the struct // Sizes of the pools to allocate inside the Buffer. // The goal is modest amortization and allocation // on at least 16-byte boundaries. const ( boolPoolSize = 16 uint32PoolSize = 8 uint64PoolSize = 4 ) // Decode a bool. func (o *Buffer) dec_bool(p *Properties, base structPointer) error { u, err := p.valDec(o) if err != nil { return err } if len(o.bools) == 0 { o.bools = make([]bool, boolPoolSize) } o.bools[0] = u != 0 *structPointer_Bool(base, p.field) = &o.bools[0] o.bools = o.bools[1:] return nil } func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { u, err := p.valDec(o) if err != nil { return err } *structPointer_BoolVal(base, p.field) = u != 0 return nil } // Decode an int32. func (o *Buffer) dec_int32(p *Properties, base structPointer) error { u, err := p.valDec(o) if err != nil { return err } word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) return nil } func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { u, err := p.valDec(o) if err != nil { return err } word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) return nil } // Decode an int64. func (o *Buffer) dec_int64(p *Properties, base structPointer) error { u, err := p.valDec(o) if err != nil { return err } word64_Set(structPointer_Word64(base, p.field), o, u) return nil } func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { u, err := p.valDec(o) if err != nil { return err } word64Val_Set(structPointer_Word64Val(base, p.field), o, u) return nil } // Decode a string. func (o *Buffer) dec_string(p *Properties, base structPointer) error { s, err := o.DecodeStringBytes() if err != nil { return err } *structPointer_String(base, p.field) = &s return nil } func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { s, err := o.DecodeStringBytes() if err != nil { return err } *structPointer_StringVal(base, p.field) = s return nil } // Decode a slice of bytes ([]byte). func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { b, err := o.DecodeRawBytes(true) if err != nil { return err } *structPointer_Bytes(base, p.field) = b return nil } // Decode a slice of bools ([]bool). func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { u, err := p.valDec(o) if err != nil { return err } v := structPointer_BoolSlice(base, p.field) *v = append(*v, u != 0) return nil } // Decode a slice of bools ([]bool) in packed format. func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { v := structPointer_BoolSlice(base, p.field) nn, err := o.DecodeVarint() if err != nil { return err } nb := int(nn) // number of bytes of encoded bools y := *v for i := 0; i < nb; i++ { u, err := p.valDec(o) if err != nil { return err } y = append(y, u != 0) } *v = y return nil } // Decode a slice of int32s ([]int32). func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { u, err := p.valDec(o) if err != nil { return err } structPointer_Word32Slice(base, p.field).Append(uint32(u)) return nil } // Decode a slice of int32s ([]int32) in packed format. func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { v := structPointer_Word32Slice(base, p.field) nn, err := o.DecodeVarint() if err != nil { return err } nb := int(nn) // number of bytes of encoded int32s fin := o.index + nb if fin < o.index { return errOverflow } for o.index < fin { u, err := p.valDec(o) if err != nil { return err } v.Append(uint32(u)) } return nil } // Decode a slice of int64s ([]int64). func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { u, err := p.valDec(o) if err != nil { return err } structPointer_Word64Slice(base, p.field).Append(u) return nil } // Decode a slice of int64s ([]int64) in packed format. func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { v := structPointer_Word64Slice(base, p.field) nn, err := o.DecodeVarint() if err != nil { return err } nb := int(nn) // number of bytes of encoded int64s fin := o.index + nb if fin < o.index { return errOverflow } for o.index < fin { u, err := p.valDec(o) if err != nil { return err } v.Append(u) } return nil } // Decode a slice of strings ([]string). func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { s, err := o.DecodeStringBytes() if err != nil { return err } v := structPointer_StringSlice(base, p.field) *v = append(*v, s) return nil } // Decode a slice of slice of bytes ([][]byte). func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { b, err := o.DecodeRawBytes(true) if err != nil { return err } v := structPointer_BytesSlice(base, p.field) *v = append(*v, b) return nil } // Decode a map field. func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { raw, err := o.DecodeRawBytes(false) if err != nil { return err } oi := o.index // index at the end of this map entry o.index -= len(raw) // move buffer back to start of map entry mptr := structPointer_Map(base, p.field, p.mtype) // *map[K]V if mptr.Elem().IsNil() { mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) } v := mptr.Elem() // map[K]V // Prepare addressable doubly-indirect placeholders for the key and value types. // See enc_new_map for why. keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K keybase := toStructPointer(keyptr.Addr()) // **K var valbase structPointer var valptr reflect.Value switch p.mtype.Elem().Kind() { case reflect.Slice: // []byte var dummy []byte valptr = reflect.ValueOf(&dummy) // *[]byte valbase = toStructPointer(valptr) // *[]byte case reflect.Ptr: // message; valptr is **Msg; need to allocate the intermediate pointer valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V valptr.Set(reflect.New(valptr.Type().Elem())) valbase = toStructPointer(valptr) default: // everything else valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V valbase = toStructPointer(valptr.Addr()) // **V } // Decode. // This parses a restricted wire format, namely the encoding of a message // with two fields. See enc_new_map for the format. for o.index < oi { // tagcode for key and value properties are always a single byte // because they have tags 1 and 2. tagcode := o.buf[o.index] o.index++ switch tagcode { case p.mkeyprop.tagcode[0]: if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { return err } case p.mvalprop.tagcode[0]: if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { return err } default: // TODO: Should we silently skip this instead? return fmt.Errorf("proto: bad map data tag %d", raw[0]) } } keyelem, valelem := keyptr.Elem(), valptr.Elem() if !keyelem.IsValid() || !valelem.IsValid() { // We did not decode the key or the value in the map entry. // Either way, it's an invalid map entry. return fmt.Errorf("proto: bad map data: missing key/val") } v.SetMapIndex(keyelem, valelem) return nil } // Decode a group. func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { bas := structPointer_GetStructPointer(base, p.field) if structPointer_IsNil(bas) { // allocate new nested message bas = toStructPointer(reflect.New(p.stype)) structPointer_SetStructPointer(base, p.field, bas) } return o.unmarshalType(p.stype, p.sprop, true, bas) } // Decode an embedded message. func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { raw, e := o.DecodeRawBytes(false) if e != nil { return e } bas := structPointer_GetStructPointer(base, p.field) if structPointer_IsNil(bas) { // allocate new nested message bas = toStructPointer(reflect.New(p.stype)) structPointer_SetStructPointer(base, p.field, bas) } // If the object can unmarshal itself, let it. if p.isUnmarshaler { iv := structPointer_Interface(bas, p.stype) return iv.(Unmarshaler).Unmarshal(raw) } obuf := o.buf oi := o.index o.buf = raw o.index = 0 err = o.unmarshalType(p.stype, p.sprop, false, bas) o.buf = obuf o.index = oi return err } // Decode a slice of embedded messages. func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { return o.dec_slice_struct(p, false, base) } // Decode a slice of embedded groups. func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { return o.dec_slice_struct(p, true, base) } // Decode a slice of structs ([]*struct). func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { v := reflect.New(p.stype) bas := toStructPointer(v) structPointer_StructPointerSlice(base, p.field).Append(bas) if is_group { err := o.unmarshalType(p.stype, p.sprop, is_group, bas) return err } raw, err := o.DecodeRawBytes(false) if err != nil { return err } // If the object can unmarshal itself, let it. if p.isUnmarshaler { iv := v.Interface() return iv.(Unmarshaler).Unmarshal(raw) } obuf := o.buf oi := o.index o.buf = raw o.index = 0 err = o.unmarshalType(p.stype, p.sprop, is_group, bas) o.buf = obuf o.index = oi return err } ================================================ FILE: vendor/github.com/golang/protobuf/proto/encode.go ================================================ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package proto /* * Routines for encoding data into the wire format for protocol buffers. */ import ( "errors" "fmt" "reflect" "sort" ) // RequiredNotSetError is the error returned if Marshal is called with // a protocol buffer struct whose required fields have not // all been initialized. It is also the error returned if Unmarshal is // called with an encoded protocol buffer that does not include all the // required fields. // // When printed, RequiredNotSetError reports the first unset required field in a // message. If the field cannot be precisely determined, it is reported as // "{Unknown}". type RequiredNotSetError struct { field string } func (e *RequiredNotSetError) Error() string { return fmt.Sprintf("proto: required field %q not set", e.field) } var ( // errRepeatedHasNil is the error returned if Marshal is called with // a struct with a repeated field containing a nil element. errRepeatedHasNil = errors.New("proto: repeated field has nil element") // ErrNil is the error returned if Marshal is called with nil. ErrNil = errors.New("proto: Marshal called with nil") ) // The fundamental encoders that put bytes on the wire. // Those that take integer types all accept uint64 and are // therefore of type valueEncoder. const maxVarintBytes = 10 // maximum length of a varint // EncodeVarint returns the varint encoding of x. // This is the format for the // int32, int64, uint32, uint64, bool, and enum // protocol buffer types. // Not used by the package itself, but helpful to clients // wishing to use the same encoding. func EncodeVarint(x uint64) []byte { var buf [maxVarintBytes]byte var n int for n = 0; x > 127; n++ { buf[n] = 0x80 | uint8(x&0x7F) x >>= 7 } buf[n] = uint8(x) n++ return buf[0:n] } // EncodeVarint writes a varint-encoded integer to the Buffer. // This is the format for the // int32, int64, uint32, uint64, bool, and enum // protocol buffer types. func (p *Buffer) EncodeVarint(x uint64) error { for x >= 1<<7 { p.buf = append(p.buf, uint8(x&0x7f|0x80)) x >>= 7 } p.buf = append(p.buf, uint8(x)) return nil } func sizeVarint(x uint64) (n int) { for { n++ x >>= 7 if x == 0 { break } } return n } // EncodeFixed64 writes a 64-bit integer to the Buffer. // This is the format for the // fixed64, sfixed64, and double protocol buffer types. func (p *Buffer) EncodeFixed64(x uint64) error { p.buf = append(p.buf, uint8(x), uint8(x>>8), uint8(x>>16), uint8(x>>24), uint8(x>>32), uint8(x>>40), uint8(x>>48), uint8(x>>56)) return nil } func sizeFixed64(x uint64) int { return 8 } // EncodeFixed32 writes a 32-bit integer to the Buffer. // This is the format for the // fixed32, sfixed32, and float protocol buffer types. func (p *Buffer) EncodeFixed32(x uint64) error { p.buf = append(p.buf, uint8(x), uint8(x>>8), uint8(x>>16), uint8(x>>24)) return nil } func sizeFixed32(x uint64) int { return 4 } // EncodeZigzag64 writes a zigzag-encoded 64-bit integer // to the Buffer. // This is the format used for the sint64 protocol buffer type. func (p *Buffer) EncodeZigzag64(x uint64) error { // use signed number to get arithmetic right shift. return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } func sizeZigzag64(x uint64) int { return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } // EncodeZigzag32 writes a zigzag-encoded 32-bit integer // to the Buffer. // This is the format used for the sint32 protocol buffer type. func (p *Buffer) EncodeZigzag32(x uint64) error { // use signed number to get arithmetic right shift. return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) } func sizeZigzag32(x uint64) int { return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) } // EncodeRawBytes writes a count-delimited byte buffer to the Buffer. // This is the format used for the bytes protocol buffer // type and for embedded messages. func (p *Buffer) EncodeRawBytes(b []byte) error { p.EncodeVarint(uint64(len(b))) p.buf = append(p.buf, b...) return nil } func sizeRawBytes(b []byte) int { return sizeVarint(uint64(len(b))) + len(b) } // EncodeStringBytes writes an encoded string to the Buffer. // This is the format used for the proto2 string type. func (p *Buffer) EncodeStringBytes(s string) error { p.EncodeVarint(uint64(len(s))) p.buf = append(p.buf, s...) return nil } func sizeStringBytes(s string) int { return sizeVarint(uint64(len(s))) + len(s) } // Marshaler is the interface representing objects that can marshal themselves. type Marshaler interface { Marshal() ([]byte, error) } // Marshal takes the protocol buffer // and encodes it into the wire format, returning the data. func Marshal(pb Message) ([]byte, error) { // Can the object marshal itself? if m, ok := pb.(Marshaler); ok { return m.Marshal() } p := NewBuffer(nil) err := p.Marshal(pb) var state errorState if err != nil && !state.shouldContinue(err, nil) { return nil, err } if p.buf == nil && err == nil { // Return a non-nil slice on success. return []byte{}, nil } return p.buf, err } // Marshal takes the protocol buffer // and encodes it into the wire format, writing the result to the // Buffer. func (p *Buffer) Marshal(pb Message) error { // Can the object marshal itself? if m, ok := pb.(Marshaler); ok { data, err := m.Marshal() if err != nil { return err } p.buf = append(p.buf, data...) return nil } t, base, err := getbase(pb) if structPointer_IsNil(base) { return ErrNil } if err == nil { err = p.enc_struct(GetProperties(t.Elem()), base) } if collectStats { stats.Encode++ } return err } // Size returns the encoded size of a protocol buffer. func Size(pb Message) (n int) { // Can the object marshal itself? If so, Size is slow. // TODO: add Size to Marshaler, or add a Sizer interface. if m, ok := pb.(Marshaler); ok { b, _ := m.Marshal() return len(b) } t, base, err := getbase(pb) if structPointer_IsNil(base) { return 0 } if err == nil { n = size_struct(GetProperties(t.Elem()), base) } if collectStats { stats.Size++ } return } // Individual type encoders. // Encode a bool. func (o *Buffer) enc_bool(p *Properties, base structPointer) error { v := *structPointer_Bool(base, p.field) if v == nil { return ErrNil } x := 0 if *v { x = 1 } o.buf = append(o.buf, p.tagcode...) p.valEnc(o, uint64(x)) return nil } func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { v := *structPointer_BoolVal(base, p.field) if !v { return ErrNil } o.buf = append(o.buf, p.tagcode...) p.valEnc(o, 1) return nil } func size_bool(p *Properties, base structPointer) int { v := *structPointer_Bool(base, p.field) if v == nil { return 0 } return len(p.tagcode) + 1 // each bool takes exactly one byte } func size_proto3_bool(p *Properties, base structPointer) int { v := *structPointer_BoolVal(base, p.field) if !v { return 0 } return len(p.tagcode) + 1 // each bool takes exactly one byte } // Encode an int32. func (o *Buffer) enc_int32(p *Properties, base structPointer) error { v := structPointer_Word32(base, p.field) if word32_IsNil(v) { return ErrNil } x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range o.buf = append(o.buf, p.tagcode...) p.valEnc(o, uint64(x)) return nil } func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { v := structPointer_Word32Val(base, p.field) x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range if x == 0 { return ErrNil } o.buf = append(o.buf, p.tagcode...) p.valEnc(o, uint64(x)) return nil } func size_int32(p *Properties, base structPointer) (n int) { v := structPointer_Word32(base, p.field) if word32_IsNil(v) { return 0 } x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range n += len(p.tagcode) n += p.valSize(uint64(x)) return } func size_proto3_int32(p *Properties, base structPointer) (n int) { v := structPointer_Word32Val(base, p.field) x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range if x == 0 { return 0 } n += len(p.tagcode) n += p.valSize(uint64(x)) return } // Encode a uint32. // Exactly the same as int32, except for no sign extension. func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { v := structPointer_Word32(base, p.field) if word32_IsNil(v) { return ErrNil } x := word32_Get(v) o.buf = append(o.buf, p.tagcode...) p.valEnc(o, uint64(x)) return nil } func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { v := structPointer_Word32Val(base, p.field) x := word32Val_Get(v) if x == 0 { return ErrNil } o.buf = append(o.buf, p.tagcode...) p.valEnc(o, uint64(x)) return nil } func size_uint32(p *Properties, base structPointer) (n int) { v := structPointer_Word32(base, p.field) if word32_IsNil(v) { return 0 } x := word32_Get(v) n += len(p.tagcode) n += p.valSize(uint64(x)) return } func size_proto3_uint32(p *Properties, base structPointer) (n int) { v := structPointer_Word32Val(base, p.field) x := word32Val_Get(v) if x == 0 { return 0 } n += len(p.tagcode) n += p.valSize(uint64(x)) return } // Encode an int64. func (o *Buffer) enc_int64(p *Properties, base structPointer) error { v := structPointer_Word64(base, p.field) if word64_IsNil(v) { return ErrNil } x := word64_Get(v) o.buf = append(o.buf, p.tagcode...) p.valEnc(o, x) return nil } func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { v := structPointer_Word64Val(base, p.field) x := word64Val_Get(v) if x == 0 { return ErrNil } o.buf = append(o.buf, p.tagcode...) p.valEnc(o, x) return nil } func size_int64(p *Properties, base structPointer) (n int) { v := structPointer_Word64(base, p.field) if word64_IsNil(v) { return 0 } x := word64_Get(v) n += len(p.tagcode) n += p.valSize(x) return } func size_proto3_int64(p *Properties, base structPointer) (n int) { v := structPointer_Word64Val(base, p.field) x := word64Val_Get(v) if x == 0 { return 0 } n += len(p.tagcode) n += p.valSize(x) return } // Encode a string. func (o *Buffer) enc_string(p *Properties, base structPointer) error { v := *structPointer_String(base, p.field) if v == nil { return ErrNil } x := *v o.buf = append(o.buf, p.tagcode...) o.EncodeStringBytes(x) return nil } func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { v := *structPointer_StringVal(base, p.field) if v == "" { return ErrNil } o.buf = append(o.buf, p.tagcode...) o.EncodeStringBytes(v) return nil } func size_string(p *Properties, base structPointer) (n int) { v := *structPointer_String(base, p.field) if v == nil { return 0 } x := *v n += len(p.tagcode) n += sizeStringBytes(x) return } func size_proto3_string(p *Properties, base structPointer) (n int) { v := *structPointer_StringVal(base, p.field) if v == "" { return 0 } n += len(p.tagcode) n += sizeStringBytes(v) return } // All protocol buffer fields are nillable, but be careful. func isNil(v reflect.Value) bool { switch v.Kind() { case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: return v.IsNil() } return false } // Encode a message struct. func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { var state errorState structp := structPointer_GetStructPointer(base, p.field) if structPointer_IsNil(structp) { return ErrNil } // Can the object marshal itself? if p.isMarshaler { m := structPointer_Interface(structp, p.stype).(Marshaler) data, err := m.Marshal() if err != nil && !state.shouldContinue(err, nil) { return err } o.buf = append(o.buf, p.tagcode...) o.EncodeRawBytes(data) return nil } o.buf = append(o.buf, p.tagcode...) return o.enc_len_struct(p.sprop, structp, &state) } func size_struct_message(p *Properties, base structPointer) int { structp := structPointer_GetStructPointer(base, p.field) if structPointer_IsNil(structp) { return 0 } // Can the object marshal itself? if p.isMarshaler { m := structPointer_Interface(structp, p.stype).(Marshaler) data, _ := m.Marshal() n0 := len(p.tagcode) n1 := sizeRawBytes(data) return n0 + n1 } n0 := len(p.tagcode) n1 := size_struct(p.sprop, structp) n2 := sizeVarint(uint64(n1)) // size of encoded length return n0 + n1 + n2 } // Encode a group struct. func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { var state errorState b := structPointer_GetStructPointer(base, p.field) if structPointer_IsNil(b) { return ErrNil } o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) err := o.enc_struct(p.sprop, b) if err != nil && !state.shouldContinue(err, nil) { return err } o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) return state.err } func size_struct_group(p *Properties, base structPointer) (n int) { b := structPointer_GetStructPointer(base, p.field) if structPointer_IsNil(b) { return 0 } n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) n += size_struct(p.sprop, b) n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) return } // Encode a slice of bools ([]bool). func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { s := *structPointer_BoolSlice(base, p.field) l := len(s) if l == 0 { return ErrNil } for _, x := range s { o.buf = append(o.buf, p.tagcode...) v := uint64(0) if x { v = 1 } p.valEnc(o, v) } return nil } func size_slice_bool(p *Properties, base structPointer) int { s := *structPointer_BoolSlice(base, p.field) l := len(s) if l == 0 { return 0 } return l * (len(p.tagcode) + 1) // each bool takes exactly one byte } // Encode a slice of bools ([]bool) in packed format. func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { s := *structPointer_BoolSlice(base, p.field) l := len(s) if l == 0 { return ErrNil } o.buf = append(o.buf, p.tagcode...) o.EncodeVarint(uint64(l)) // each bool takes exactly one byte for _, x := range s { v := uint64(0) if x { v = 1 } p.valEnc(o, v) } return nil } func size_slice_packed_bool(p *Properties, base structPointer) (n int) { s := *structPointer_BoolSlice(base, p.field) l := len(s) if l == 0 { return 0 } n += len(p.tagcode) n += sizeVarint(uint64(l)) n += l // each bool takes exactly one byte return } // Encode a slice of bytes ([]byte). func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { s := *structPointer_Bytes(base, p.field) if s == nil { return ErrNil } o.buf = append(o.buf, p.tagcode...) o.EncodeRawBytes(s) return nil } func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { s := *structPointer_Bytes(base, p.field) if len(s) == 0 { return ErrNil } o.buf = append(o.buf, p.tagcode...) o.EncodeRawBytes(s) return nil } func size_slice_byte(p *Properties, base structPointer) (n int) { s := *structPointer_Bytes(base, p.field) if s == nil { return 0 } n += len(p.tagcode) n += sizeRawBytes(s) return } func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { s := *structPointer_Bytes(base, p.field) if len(s) == 0 { return 0 } n += len(p.tagcode) n += sizeRawBytes(s) return } // Encode a slice of int32s ([]int32). func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { s := structPointer_Word32Slice(base, p.field) l := s.Len() if l == 0 { return ErrNil } for i := 0; i < l; i++ { o.buf = append(o.buf, p.tagcode...) x := int32(s.Index(i)) // permit sign extension to use full 64-bit range p.valEnc(o, uint64(x)) } return nil } func size_slice_int32(p *Properties, base structPointer) (n int) { s := structPointer_Word32Slice(base, p.field) l := s.Len() if l == 0 { return 0 } for i := 0; i < l; i++ { n += len(p.tagcode) x := int32(s.Index(i)) // permit sign extension to use full 64-bit range n += p.valSize(uint64(x)) } return } // Encode a slice of int32s ([]int32) in packed format. func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { s := structPointer_Word32Slice(base, p.field) l := s.Len() if l == 0 { return ErrNil } // TODO: Reuse a Buffer. buf := NewBuffer(nil) for i := 0; i < l; i++ { x := int32(s.Index(i)) // permit sign extension to use full 64-bit range p.valEnc(buf, uint64(x)) } o.buf = append(o.buf, p.tagcode...) o.EncodeVarint(uint64(len(buf.buf))) o.buf = append(o.buf, buf.buf...) return nil } func size_slice_packed_int32(p *Properties, base structPointer) (n int) { s := structPointer_Word32Slice(base, p.field) l := s.Len() if l == 0 { return 0 } var bufSize int for i := 0; i < l; i++ { x := int32(s.Index(i)) // permit sign extension to use full 64-bit range bufSize += p.valSize(uint64(x)) } n += len(p.tagcode) n += sizeVarint(uint64(bufSize)) n += bufSize return } // Encode a slice of uint32s ([]uint32). // Exactly the same as int32, except for no sign extension. func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { s := structPointer_Word32Slice(base, p.field) l := s.Len() if l == 0 { return ErrNil } for i := 0; i < l; i++ { o.buf = append(o.buf, p.tagcode...) x := s.Index(i) p.valEnc(o, uint64(x)) } return nil } func size_slice_uint32(p *Properties, base structPointer) (n int) { s := structPointer_Word32Slice(base, p.field) l := s.Len() if l == 0 { return 0 } for i := 0; i < l; i++ { n += len(p.tagcode) x := s.Index(i) n += p.valSize(uint64(x)) } return } // Encode a slice of uint32s ([]uint32) in packed format. // Exactly the same as int32, except for no sign extension. func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { s := structPointer_Word32Slice(base, p.field) l := s.Len() if l == 0 { return ErrNil } // TODO: Reuse a Buffer. buf := NewBuffer(nil) for i := 0; i < l; i++ { p.valEnc(buf, uint64(s.Index(i))) } o.buf = append(o.buf, p.tagcode...) o.EncodeVarint(uint64(len(buf.buf))) o.buf = append(o.buf, buf.buf...) return nil } func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { s := structPointer_Word32Slice(base, p.field) l := s.Len() if l == 0 { return 0 } var bufSize int for i := 0; i < l; i++ { bufSize += p.valSize(uint64(s.Index(i))) } n += len(p.tagcode) n += sizeVarint(uint64(bufSize)) n += bufSize return } // Encode a slice of int64s ([]int64). func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { s := structPointer_Word64Slice(base, p.field) l := s.Len() if l == 0 { return ErrNil } for i := 0; i < l; i++ { o.buf = append(o.buf, p.tagcode...) p.valEnc(o, s.Index(i)) } return nil } func size_slice_int64(p *Properties, base structPointer) (n int) { s := structPointer_Word64Slice(base, p.field) l := s.Len() if l == 0 { return 0 } for i := 0; i < l; i++ { n += len(p.tagcode) n += p.valSize(s.Index(i)) } return } // Encode a slice of int64s ([]int64) in packed format. func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { s := structPointer_Word64Slice(base, p.field) l := s.Len() if l == 0 { return ErrNil } // TODO: Reuse a Buffer. buf := NewBuffer(nil) for i := 0; i < l; i++ { p.valEnc(buf, s.Index(i)) } o.buf = append(o.buf, p.tagcode...) o.EncodeVarint(uint64(len(buf.buf))) o.buf = append(o.buf, buf.buf...) return nil } func size_slice_packed_int64(p *Properties, base structPointer) (n int) { s := structPointer_Word64Slice(base, p.field) l := s.Len() if l == 0 { return 0 } var bufSize int for i := 0; i < l; i++ { bufSize += p.valSize(s.Index(i)) } n += len(p.tagcode) n += sizeVarint(uint64(bufSize)) n += bufSize return } // Encode a slice of slice of bytes ([][]byte). func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { ss := *structPointer_BytesSlice(base, p.field) l := len(ss) if l == 0 { return ErrNil } for i := 0; i < l; i++ { o.buf = append(o.buf, p.tagcode...) o.EncodeRawBytes(ss[i]) } return nil } func size_slice_slice_byte(p *Properties, base structPointer) (n int) { ss := *structPointer_BytesSlice(base, p.field) l := len(ss) if l == 0 { return 0 } n += l * len(p.tagcode) for i := 0; i < l; i++ { n += sizeRawBytes(ss[i]) } return } // Encode a slice of strings ([]string). func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { ss := *structPointer_StringSlice(base, p.field) l := len(ss) for i := 0; i < l; i++ { o.buf = append(o.buf, p.tagcode...) o.EncodeStringBytes(ss[i]) } return nil } func size_slice_string(p *Properties, base structPointer) (n int) { ss := *structPointer_StringSlice(base, p.field) l := len(ss) n += l * len(p.tagcode) for i := 0; i < l; i++ { n += sizeStringBytes(ss[i]) } return } // Encode a slice of message structs ([]*struct). func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { var state errorState s := structPointer_StructPointerSlice(base, p.field) l := s.Len() for i := 0; i < l; i++ { structp := s.Index(i) if structPointer_IsNil(structp) { return errRepeatedHasNil } // Can the object marshal itself? if p.isMarshaler { m := structPointer_Interface(structp, p.stype).(Marshaler) data, err := m.Marshal() if err != nil && !state.shouldContinue(err, nil) { return err } o.buf = append(o.buf, p.tagcode...) o.EncodeRawBytes(data) continue } o.buf = append(o.buf, p.tagcode...) err := o.enc_len_struct(p.sprop, structp, &state) if err != nil && !state.shouldContinue(err, nil) { if err == ErrNil { return errRepeatedHasNil } return err } } return state.err } func size_slice_struct_message(p *Properties, base structPointer) (n int) { s := structPointer_StructPointerSlice(base, p.field) l := s.Len() n += l * len(p.tagcode) for i := 0; i < l; i++ { structp := s.Index(i) if structPointer_IsNil(structp) { return // return the size up to this point } // Can the object marshal itself? if p.isMarshaler { m := structPointer_Interface(structp, p.stype).(Marshaler) data, _ := m.Marshal() n += len(p.tagcode) n += sizeRawBytes(data) continue } n0 := size_struct(p.sprop, structp) n1 := sizeVarint(uint64(n0)) // size of encoded length n += n0 + n1 } return } // Encode a slice of group structs ([]*struct). func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { var state errorState s := structPointer_StructPointerSlice(base, p.field) l := s.Len() for i := 0; i < l; i++ { b := s.Index(i) if structPointer_IsNil(b) { return errRepeatedHasNil } o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) err := o.enc_struct(p.sprop, b) if err != nil && !state.shouldContinue(err, nil) { if err == ErrNil { return errRepeatedHasNil } return err } o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) } return state.err } func size_slice_struct_group(p *Properties, base structPointer) (n int) { s := structPointer_StructPointerSlice(base, p.field) l := s.Len() n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) for i := 0; i < l; i++ { b := s.Index(i) if structPointer_IsNil(b) { return // return size up to this point } n += size_struct(p.sprop, b) } return } // Encode an extension map. func (o *Buffer) enc_map(p *Properties, base structPointer) error { v := *structPointer_ExtMap(base, p.field) if err := encodeExtensionMap(v); err != nil { return err } // Fast-path for common cases: zero or one extensions. if len(v) <= 1 { for _, e := range v { o.buf = append(o.buf, e.enc...) } return nil } // Sort keys to provide a deterministic encoding. keys := make([]int, 0, len(v)) for k := range v { keys = append(keys, int(k)) } sort.Ints(keys) for _, k := range keys { o.buf = append(o.buf, v[int32(k)].enc...) } return nil } func size_map(p *Properties, base structPointer) int { v := *structPointer_ExtMap(base, p.field) return sizeExtensionMap(v) } // Encode a map field. func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { var state errorState // XXX: or do we need to plumb this through? /* A map defined as map map_field = N; is encoded in the same way as message MapFieldEntry { key_type key = 1; value_type value = 2; } repeated MapFieldEntry map_field = N; */ v := structPointer_Map(base, p.field, p.mtype).Elem() // map[K]V if v.Len() == 0 { return nil } keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) enc := func() error { if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { return err } if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil { return err } return nil } keys := v.MapKeys() sort.Sort(mapKeys(keys)) for _, key := range keys { val := v.MapIndex(key) // The only illegal map entry values are nil message pointers. if val.Kind() == reflect.Ptr && val.IsNil() { return errors.New("proto: map has nil element") } keycopy.Set(key) valcopy.Set(val) o.buf = append(o.buf, p.tagcode...) if err := o.enc_len_thing(enc, &state); err != nil { return err } } return nil } func size_new_map(p *Properties, base structPointer) int { v := structPointer_Map(base, p.field, p.mtype).Elem() // map[K]V keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) n := 0 for _, key := range v.MapKeys() { val := v.MapIndex(key) keycopy.Set(key) valcopy.Set(val) // Tag codes for key and val are the responsibility of the sub-sizer. keysize := p.mkeyprop.size(p.mkeyprop, keybase) valsize := p.mvalprop.size(p.mvalprop, valbase) entry := keysize + valsize // Add on tag code and length of map entry itself. n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry } return n } // mapEncodeScratch returns a new reflect.Value matching the map's value type, // and a structPointer suitable for passing to an encoder or sizer. func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { // Prepare addressable doubly-indirect placeholders for the key and value types. // This is needed because the element-type encoders expect **T, but the map iteration produces T. keycopy = reflect.New(mapType.Key()).Elem() // addressable K keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K keyptr.Set(keycopy.Addr()) // keybase = toStructPointer(keyptr.Addr()) // **K // Value types are more varied and require special handling. switch mapType.Elem().Kind() { case reflect.Slice: // []byte var dummy []byte valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte valbase = toStructPointer(valcopy.Addr()) case reflect.Ptr: // message; the generated field type is map[K]*Msg (so V is *Msg), // so we only need one level of indirection. valcopy = reflect.New(mapType.Elem()).Elem() // addressable V valbase = toStructPointer(valcopy.Addr()) default: // everything else valcopy = reflect.New(mapType.Elem()).Elem() // addressable V valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V valptr.Set(valcopy.Addr()) // valbase = toStructPointer(valptr.Addr()) // **V } return } // Encode a struct. func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { var state errorState // Encode fields in tag order so that decoders may use optimizations // that depend on the ordering. // https://developers.google.com/protocol-buffers/docs/encoding#order for _, i := range prop.order { p := prop.Prop[i] if p.enc != nil { err := p.enc(o, p, base) if err != nil { if err == ErrNil { if p.Required && state.err == nil { state.err = &RequiredNotSetError{p.Name} } } else if err == errRepeatedHasNil { // Give more context to nil values in repeated fields. return errors.New("repeated field " + p.OrigName + " has nil element") } else if !state.shouldContinue(err, p) { return err } } } } // Add unrecognized fields at the end. if prop.unrecField.IsValid() { v := *structPointer_Bytes(base, prop.unrecField) if len(v) > 0 { o.buf = append(o.buf, v...) } } return state.err } func size_struct(prop *StructProperties, base structPointer) (n int) { for _, i := range prop.order { p := prop.Prop[i] if p.size != nil { n += p.size(p, base) } } // Add unrecognized fields at the end. if prop.unrecField.IsValid() { v := *structPointer_Bytes(base, prop.unrecField) n += len(v) } return } var zeroes [20]byte // longer than any conceivable sizeVarint // Encode a struct, preceded by its encoded length (as a varint). func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) } // Encode something, preceded by its encoded length (as a varint). func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { iLen := len(o.buf) o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length iMsg := len(o.buf) err := enc() if err != nil && !state.shouldContinue(err, nil) { return err } lMsg := len(o.buf) - iMsg lLen := sizeVarint(uint64(lMsg)) switch x := lLen - (iMsg - iLen); { case x > 0: // actual length is x bytes larger than the space we reserved // Move msg x bytes right. o.buf = append(o.buf, zeroes[:x]...) copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) case x < 0: // actual length is x bytes smaller than the space we reserved // Move msg x bytes left. copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) o.buf = o.buf[:len(o.buf)+x] // x is negative } // Encode the length in the reserved space. o.buf = o.buf[:iLen] o.EncodeVarint(uint64(lMsg)) o.buf = o.buf[:len(o.buf)+lMsg] return state.err } // errorState maintains the first error that occurs and updates that error // with additional context. type errorState struct { err error } // shouldContinue reports whether encoding should continue upon encountering the // given error. If the error is RequiredNotSetError, shouldContinue returns true // and, if this is the first appearance of that error, remembers it for future // reporting. // // If prop is not nil, it may update any error with additional context about the // field with the error. func (s *errorState) shouldContinue(err error, prop *Properties) bool { // Ignore unset required fields. reqNotSet, ok := err.(*RequiredNotSetError) if !ok { return false } if s.err == nil { if prop != nil { err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} } s.err = err } return true } ================================================ FILE: vendor/github.com/golang/protobuf/proto/equal.go ================================================ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2011 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // Protocol buffer comparison. // TODO: MessageSet. package proto import ( "bytes" "log" "reflect" "strings" ) /* Equal returns true iff protocol buffers a and b are equal. The arguments must both be pointers to protocol buffer structs. Equality is defined in this way: - Two messages are equal iff they are the same type, corresponding fields are equal, unknown field sets are equal, and extensions sets are equal. - Two set scalar fields are equal iff their values are equal. If the fields are of a floating-point type, remember that NaN != x for all x, including NaN. - Two repeated fields are equal iff their lengths are the same, and their corresponding elements are equal (a "bytes" field, although represented by []byte, is not a repeated field) - Two unset fields are equal. - Two unknown field sets are equal if their current encoded state is equal. - Two extension sets are equal iff they have corresponding elements that are pairwise equal. - Every other combination of things are not equal. The return value is undefined if a and b are not protocol buffers. */ func Equal(a, b Message) bool { if a == nil || b == nil { return a == b } v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) if v1.Type() != v2.Type() { return false } if v1.Kind() == reflect.Ptr { if v1.IsNil() { return v2.IsNil() } if v2.IsNil() { return false } v1, v2 = v1.Elem(), v2.Elem() } if v1.Kind() != reflect.Struct { return false } return equalStruct(v1, v2) } // v1 and v2 are known to have the same type. func equalStruct(v1, v2 reflect.Value) bool { for i := 0; i < v1.NumField(); i++ { f := v1.Type().Field(i) if strings.HasPrefix(f.Name, "XXX_") { continue } f1, f2 := v1.Field(i), v2.Field(i) if f.Type.Kind() == reflect.Ptr { if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { // both unset continue } else if n1 != n2 { // set/unset mismatch return false } b1, ok := f1.Interface().(raw) if ok { b2 := f2.Interface().(raw) // RawMessage if !bytes.Equal(b1.Bytes(), b2.Bytes()) { return false } continue } f1, f2 = f1.Elem(), f2.Elem() } if !equalAny(f1, f2) { return false } } if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { em2 := v2.FieldByName("XXX_extensions") if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { return false } } uf := v1.FieldByName("XXX_unrecognized") if !uf.IsValid() { return true } u1 := uf.Bytes() u2 := v2.FieldByName("XXX_unrecognized").Bytes() if !bytes.Equal(u1, u2) { return false } return true } // v1 and v2 are known to have the same type. func equalAny(v1, v2 reflect.Value) bool { if v1.Type() == protoMessageType { m1, _ := v1.Interface().(Message) m2, _ := v2.Interface().(Message) return Equal(m1, m2) } switch v1.Kind() { case reflect.Bool: return v1.Bool() == v2.Bool() case reflect.Float32, reflect.Float64: return v1.Float() == v2.Float() case reflect.Int32, reflect.Int64: return v1.Int() == v2.Int() case reflect.Map: if v1.Len() != v2.Len() { return false } for _, key := range v1.MapKeys() { val2 := v2.MapIndex(key) if !val2.IsValid() { // This key was not found in the second map. return false } if !equalAny(v1.MapIndex(key), val2) { return false } } return true case reflect.Ptr: return equalAny(v1.Elem(), v2.Elem()) case reflect.Slice: if v1.Type().Elem().Kind() == reflect.Uint8 { // short circuit: []byte if v1.IsNil() != v2.IsNil() { return false } return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) } if v1.Len() != v2.Len() { return false } for i := 0; i < v1.Len(); i++ { if !equalAny(v1.Index(i), v2.Index(i)) { return false } } return true case reflect.String: return v1.Interface().(string) == v2.Interface().(string) case reflect.Struct: return equalStruct(v1, v2) case reflect.Uint32, reflect.Uint64: return v1.Uint() == v2.Uint() } // unknown type, so not a protocol buffer log.Printf("proto: don't know how to compare %v", v1) return false } // base is the struct type that the extensions are based on. // em1 and em2 are extension maps. func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool { if len(em1) != len(em2) { return false } for extNum, e1 := range em1 { e2, ok := em2[extNum] if !ok { return false } m1, m2 := e1.value, e2.value if m1 != nil && m2 != nil { // Both are unencoded. if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) { return false } continue } // At least one is encoded. To do a semantically correct comparison // we need to unmarshal them first. var desc *ExtensionDesc if m := extensionMaps[base]; m != nil { desc = m[extNum] } if desc == nil { log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) continue } var err error if m1 == nil { m1, err = decodeExtension(e1.enc, desc) } if m2 == nil && err == nil { m2, err = decodeExtension(e2.enc, desc) } if err != nil { // The encoded form is invalid. log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) return false } if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) { return false } } return true } ================================================ FILE: vendor/github.com/golang/protobuf/proto/extensions.go ================================================ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package proto /* * Types and routines for supporting protocol buffer extensions. */ import ( "errors" "fmt" "reflect" "strconv" "sync" ) // ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. var ErrMissingExtension = errors.New("proto: missing extension") // ExtensionRange represents a range of message extensions for a protocol buffer. // Used in code generated by the protocol compiler. type ExtensionRange struct { Start, End int32 // both inclusive } // extendableProto is an interface implemented by any protocol buffer that may be extended. type extendableProto interface { Message ExtensionRangeArray() []ExtensionRange ExtensionMap() map[int32]Extension } var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() // ExtensionDesc represents an extension specification. // Used in generated code from the protocol compiler. type ExtensionDesc struct { ExtendedType Message // nil pointer to the type that is being extended ExtensionType interface{} // nil pointer to the extension type Field int32 // field number Name string // fully-qualified name of extension, for text formatting Tag string // protobuf tag style } func (ed *ExtensionDesc) repeated() bool { t := reflect.TypeOf(ed.ExtensionType) return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 } // Extension represents an extension in a message. type Extension struct { // When an extension is stored in a message using SetExtension // only desc and value are set. When the message is marshaled // enc will be set to the encoded form of the message. // // When a message is unmarshaled and contains extensions, each // extension will have only enc set. When such an extension is // accessed using GetExtension (or GetExtensions) desc and value // will be set. desc *ExtensionDesc value interface{} enc []byte } // SetRawExtension is for testing only. func SetRawExtension(base extendableProto, id int32, b []byte) { base.ExtensionMap()[id] = Extension{enc: b} } // isExtensionField returns true iff the given field number is in an extension range. func isExtensionField(pb extendableProto, field int32) bool { for _, er := range pb.ExtensionRangeArray() { if er.Start <= field && field <= er.End { return true } } return false } // checkExtensionTypes checks that the given extension is valid for pb. func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { // Check the extended type. if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b { return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) } // Check the range. if !isExtensionField(pb, extension.Field) { return errors.New("proto: bad extension number; not in declared ranges") } return nil } // extPropKey is sufficient to uniquely identify an extension. type extPropKey struct { base reflect.Type field int32 } var extProp = struct { sync.RWMutex m map[extPropKey]*Properties }{ m: make(map[extPropKey]*Properties), } func extensionProperties(ed *ExtensionDesc) *Properties { key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} extProp.RLock() if prop, ok := extProp.m[key]; ok { extProp.RUnlock() return prop } extProp.RUnlock() extProp.Lock() defer extProp.Unlock() // Check again. if prop, ok := extProp.m[key]; ok { return prop } prop := new(Properties) prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) extProp.m[key] = prop return prop } // encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m. func encodeExtensionMap(m map[int32]Extension) error { for k, e := range m { if e.value == nil || e.desc == nil { // Extension is only in its encoded form. continue } // We don't skip extensions that have an encoded form set, // because the extension value may have been mutated after // the last time this function was called. et := reflect.TypeOf(e.desc.ExtensionType) props := extensionProperties(e.desc) p := NewBuffer(nil) // If e.value has type T, the encoder expects a *struct{ X T }. // Pass a *T with a zero field and hope it all works out. x := reflect.New(et) x.Elem().Set(reflect.ValueOf(e.value)) if err := props.enc(p, props, toStructPointer(x)); err != nil { return err } e.enc = p.buf m[k] = e } return nil } func sizeExtensionMap(m map[int32]Extension) (n int) { for _, e := range m { if e.value == nil || e.desc == nil { // Extension is only in its encoded form. n += len(e.enc) continue } // We don't skip extensions that have an encoded form set, // because the extension value may have been mutated after // the last time this function was called. et := reflect.TypeOf(e.desc.ExtensionType) props := extensionProperties(e.desc) // If e.value has type T, the encoder expects a *struct{ X T }. // Pass a *T with a zero field and hope it all works out. x := reflect.New(et) x.Elem().Set(reflect.ValueOf(e.value)) n += props.size(props, toStructPointer(x)) } return } // HasExtension returns whether the given extension is present in pb. func HasExtension(pb extendableProto, extension *ExtensionDesc) bool { // TODO: Check types, field numbers, etc.? _, ok := pb.ExtensionMap()[extension.Field] return ok } // ClearExtension removes the given extension from pb. func ClearExtension(pb extendableProto, extension *ExtensionDesc) { // TODO: Check types, field numbers, etc.? delete(pb.ExtensionMap(), extension.Field) } // GetExtension parses and returns the given extension of pb. // If the extension is not present and has no default value it returns ErrMissingExtension. func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) { if err := checkExtensionTypes(pb, extension); err != nil { return nil, err } emap := pb.ExtensionMap() e, ok := emap[extension.Field] if !ok { // defaultExtensionValue returns the default value or // ErrMissingExtension if there is no default. return defaultExtensionValue(extension) } if e.value != nil { // Already decoded. Check the descriptor, though. if e.desc != extension { // This shouldn't happen. If it does, it means that // GetExtension was called twice with two different // descriptors with the same field number. return nil, errors.New("proto: descriptor conflict") } return e.value, nil } v, err := decodeExtension(e.enc, extension) if err != nil { return nil, err } // Remember the decoded version and drop the encoded version. // That way it is safe to mutate what we return. e.value = v e.desc = extension e.enc = nil emap[extension.Field] = e return e.value, nil } // defaultExtensionValue returns the default value for extension. // If no default for an extension is defined ErrMissingExtension is returned. func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { t := reflect.TypeOf(extension.ExtensionType) props := extensionProperties(extension) sf, _, err := fieldDefault(t, props) if err != nil { return nil, err } if sf == nil || sf.value == nil { // There is no default value. return nil, ErrMissingExtension } if t.Kind() != reflect.Ptr { // We do not need to return a Ptr, we can directly return sf.value. return sf.value, nil } // We need to return an interface{} that is a pointer to sf.value. value := reflect.New(t).Elem() value.Set(reflect.New(value.Type().Elem())) if sf.kind == reflect.Int32 { // We may have an int32 or an enum, but the underlying data is int32. // Since we can't set an int32 into a non int32 reflect.value directly // set it as a int32. value.Elem().SetInt(int64(sf.value.(int32))) } else { value.Elem().Set(reflect.ValueOf(sf.value)) } return value.Interface(), nil } // decodeExtension decodes an extension encoded in b. func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { o := NewBuffer(b) t := reflect.TypeOf(extension.ExtensionType) rep := extension.repeated() props := extensionProperties(extension) // t is a pointer to a struct, pointer to basic type or a slice. // Allocate a "field" to store the pointer/slice itself; the // pointer/slice will be stored here. We pass // the address of this field to props.dec. // This passes a zero field and a *t and lets props.dec // interpret it as a *struct{ x t }. value := reflect.New(t).Elem() for { // Discard wire type and field number varint. It isn't needed. if _, err := o.DecodeVarint(); err != nil { return nil, err } if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { return nil, err } if !rep || o.index >= len(o.buf) { break } } return value.Interface(), nil } // GetExtensions returns a slice of the extensions present in pb that are also listed in es. // The returned slice has the same length as es; missing extensions will appear as nil elements. func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { epb, ok := pb.(extendableProto) if !ok { err = errors.New("proto: not an extendable proto") return } extensions = make([]interface{}, len(es)) for i, e := range es { extensions[i], err = GetExtension(epb, e) if err == ErrMissingExtension { err = nil } if err != nil { return } } return } // SetExtension sets the specified extension of pb to the specified value. func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error { if err := checkExtensionTypes(pb, extension); err != nil { return err } typ := reflect.TypeOf(extension.ExtensionType) if typ != reflect.TypeOf(value) { return errors.New("proto: bad extension value type") } // nil extension values need to be caught early, because the // encoder can't distinguish an ErrNil due to a nil extension // from an ErrNil due to a missing field. Extensions are // always optional, so the encoder would just swallow the error // and drop all the extensions from the encoded message. if reflect.ValueOf(value).IsNil() { return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) } pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value} return nil } // A global registry of extensions. // The generated code will register the generated descriptors by calling RegisterExtension. var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) // RegisterExtension is called from the generated code. func RegisterExtension(desc *ExtensionDesc) { st := reflect.TypeOf(desc.ExtendedType).Elem() m := extensionMaps[st] if m == nil { m = make(map[int32]*ExtensionDesc) extensionMaps[st] = m } if _, ok := m[desc.Field]; ok { panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) } m[desc.Field] = desc } // RegisteredExtensions returns a map of the registered extensions of a // protocol buffer struct, indexed by the extension number. // The argument pb should be a nil pointer to the struct type. func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { return extensionMaps[reflect.TypeOf(pb).Elem()] } ================================================ FILE: vendor/github.com/golang/protobuf/proto/lib.go ================================================ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /* Package proto converts data structures to and from the wire format of protocol buffers. It works in concert with the Go source code generated for .proto files by the protocol compiler. A summary of the properties of the protocol buffer interface for a protocol buffer variable v: - Names are turned from camel_case to CamelCase for export. - There are no methods on v to set fields; just treat them as structure fields. - There are getters that return a field's value if set, and return the field's default value if unset. The getters work even if the receiver is a nil message. - The zero value for a struct is its correct initialization state. All desired fields must be set before marshaling. - A Reset() method will restore a protobuf struct to its zero state. - Non-repeated fields are pointers to the values; nil means unset. That is, optional or required field int32 f becomes F *int32. - Repeated fields are slices. - Helper functions are available to aid the setting of fields. msg.Foo = proto.String("hello") // set field - Constants are defined to hold the default values of all fields that have them. They have the form Default_StructName_FieldName. Because the getter methods handle defaulted values, direct use of these constants should be rare. - Enums are given type names and maps from names to values. Enum values are prefixed by the enclosing message's name, or by the enum's type name if it is a top-level enum. Enum types have a String method, and a Enum method to assist in message construction. - Nested messages, groups and enums have type names prefixed with the name of the surrounding message type. - Extensions are given descriptor names that start with E_, followed by an underscore-delimited list of the nested messages that contain it (if any) followed by the CamelCased name of the extension field itself. HasExtension, ClearExtension, GetExtension and SetExtension are functions for manipulating extensions. - Marshal and Unmarshal are functions to encode and decode the wire format. The simplest way to describe this is to see an example. Given file test.proto, containing package example; enum FOO { X = 17; } message Test { required string label = 1; optional int32 type = 2 [default=77]; repeated int64 reps = 3; optional group OptionalGroup = 4 { required string RequiredField = 5; } } The resulting file, test.pb.go, is: package example import proto "github.com/golang/protobuf/proto" import math "math" type FOO int32 const ( FOO_X FOO = 17 ) var FOO_name = map[int32]string{ 17: "X", } var FOO_value = map[string]int32{ "X": 17, } func (x FOO) Enum() *FOO { p := new(FOO) *p = x return p } func (x FOO) String() string { return proto.EnumName(FOO_name, int32(x)) } func (x *FOO) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(FOO_value, data) if err != nil { return err } *x = FOO(value) return nil } type Test struct { Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Test) Reset() { *m = Test{} } func (m *Test) String() string { return proto.CompactTextString(m) } func (*Test) ProtoMessage() {} const Default_Test_Type int32 = 77 func (m *Test) GetLabel() string { if m != nil && m.Label != nil { return *m.Label } return "" } func (m *Test) GetType() int32 { if m != nil && m.Type != nil { return *m.Type } return Default_Test_Type } func (m *Test) GetOptionalgroup() *Test_OptionalGroup { if m != nil { return m.Optionalgroup } return nil } type Test_OptionalGroup struct { RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` } func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } func (m *Test_OptionalGroup) GetRequiredField() string { if m != nil && m.RequiredField != nil { return *m.RequiredField } return "" } func init() { proto.RegisterEnum("example.FOO", FOO_name, FOO_value) } To create and play with a Test object: package main import ( "log" "github.com/golang/protobuf/proto" pb "./example.pb" ) func main() { test := &pb.Test{ Label: proto.String("hello"), Type: proto.Int32(17), Optionalgroup: &pb.Test_OptionalGroup{ RequiredField: proto.String("good bye"), }, } data, err := proto.Marshal(test) if err != nil { log.Fatal("marshaling error: ", err) } newTest := &pb.Test{} err = proto.Unmarshal(data, newTest) if err != nil { log.Fatal("unmarshaling error: ", err) } // Now test and newTest contain the same data. if test.GetLabel() != newTest.GetLabel() { log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) } // etc. } */ package proto import ( "encoding/json" "fmt" "log" "reflect" "strconv" "sync" ) // Message is implemented by generated protocol buffer messages. type Message interface { Reset() String() string ProtoMessage() } // Stats records allocation details about the protocol buffer encoders // and decoders. Useful for tuning the library itself. type Stats struct { Emalloc uint64 // mallocs in encode Dmalloc uint64 // mallocs in decode Encode uint64 // number of encodes Decode uint64 // number of decodes Chit uint64 // number of cache hits Cmiss uint64 // number of cache misses Size uint64 // number of sizes } // Set to true to enable stats collection. const collectStats = false var stats Stats // GetStats returns a copy of the global Stats structure. func GetStats() Stats { return stats } // A Buffer is a buffer manager for marshaling and unmarshaling // protocol buffers. It may be reused between invocations to // reduce memory usage. It is not necessary to use a Buffer; // the global functions Marshal and Unmarshal create a // temporary Buffer and are fine for most applications. type Buffer struct { buf []byte // encode/decode byte stream index int // write point // pools of basic types to amortize allocation. bools []bool uint32s []uint32 uint64s []uint64 // extra pools, only used with pointer_reflect.go int32s []int32 int64s []int64 float32s []float32 float64s []float64 } // NewBuffer allocates a new Buffer and initializes its internal data to // the contents of the argument slice. func NewBuffer(e []byte) *Buffer { return &Buffer{buf: e} } // Reset resets the Buffer, ready for marshaling a new protocol buffer. func (p *Buffer) Reset() { p.buf = p.buf[0:0] // for reading/writing p.index = 0 // for reading } // SetBuf replaces the internal buffer with the slice, // ready for unmarshaling the contents of the slice. func (p *Buffer) SetBuf(s []byte) { p.buf = s p.index = 0 } // Bytes returns the contents of the Buffer. func (p *Buffer) Bytes() []byte { return p.buf } /* * Helper routines for simplifying the creation of optional fields of basic type. */ // Bool is a helper routine that allocates a new bool value // to store v and returns a pointer to it. func Bool(v bool) *bool { return &v } // Int32 is a helper routine that allocates a new int32 value // to store v and returns a pointer to it. func Int32(v int32) *int32 { return &v } // Int is a helper routine that allocates a new int32 value // to store v and returns a pointer to it, but unlike Int32 // its argument value is an int. func Int(v int) *int32 { p := new(int32) *p = int32(v) return p } // Int64 is a helper routine that allocates a new int64 value // to store v and returns a pointer to it. func Int64(v int64) *int64 { return &v } // Float32 is a helper routine that allocates a new float32 value // to store v and returns a pointer to it. func Float32(v float32) *float32 { return &v } // Float64 is a helper routine that allocates a new float64 value // to store v and returns a pointer to it. func Float64(v float64) *float64 { return &v } // Uint32 is a helper routine that allocates a new uint32 value // to store v and returns a pointer to it. func Uint32(v uint32) *uint32 { return &v } // Uint64 is a helper routine that allocates a new uint64 value // to store v and returns a pointer to it. func Uint64(v uint64) *uint64 { return &v } // String is a helper routine that allocates a new string value // to store v and returns a pointer to it. func String(v string) *string { return &v } // EnumName is a helper function to simplify printing protocol buffer enums // by name. Given an enum map and a value, it returns a useful string. func EnumName(m map[int32]string, v int32) string { s, ok := m[v] if ok { return s } return strconv.Itoa(int(v)) } // UnmarshalJSONEnum is a helper function to simplify recovering enum int values // from their JSON-encoded representation. Given a map from the enum's symbolic // names to its int values, and a byte buffer containing the JSON-encoded // value, it returns an int32 that can be cast to the enum type by the caller. // // The function can deal with both JSON representations, numeric and symbolic. func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { if data[0] == '"' { // New style: enums are strings. var repr string if err := json.Unmarshal(data, &repr); err != nil { return -1, err } val, ok := m[repr] if !ok { return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) } return val, nil } // Old style: enums are ints. var val int32 if err := json.Unmarshal(data, &val); err != nil { return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) } return val, nil } // DebugPrint dumps the encoded data in b in a debugging format with a header // including the string s. Used in testing but made available for general debugging. func (p *Buffer) DebugPrint(s string, b []byte) { var u uint64 obuf := p.buf index := p.index p.buf = b p.index = 0 depth := 0 fmt.Printf("\n--- %s ---\n", s) out: for { for i := 0; i < depth; i++ { fmt.Print(" ") } index := p.index if index == len(p.buf) { break } op, err := p.DecodeVarint() if err != nil { fmt.Printf("%3d: fetching op err %v\n", index, err) break out } tag := op >> 3 wire := op & 7 switch wire { default: fmt.Printf("%3d: t=%3d unknown wire=%d\n", index, tag, wire) break out case WireBytes: var r []byte r, err = p.DecodeRawBytes(false) if err != nil { break out } fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) if len(r) <= 6 { for i := 0; i < len(r); i++ { fmt.Printf(" %.2x", r[i]) } } else { for i := 0; i < 3; i++ { fmt.Printf(" %.2x", r[i]) } fmt.Printf(" ..") for i := len(r) - 3; i < len(r); i++ { fmt.Printf(" %.2x", r[i]) } } fmt.Printf("\n") case WireFixed32: u, err = p.DecodeFixed32() if err != nil { fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) break out } fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) case WireFixed64: u, err = p.DecodeFixed64() if err != nil { fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) break out } fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) break case WireVarint: u, err = p.DecodeVarint() if err != nil { fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) break out } fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) case WireStartGroup: if err != nil { fmt.Printf("%3d: t=%3d start err %v\n", index, tag, err) break out } fmt.Printf("%3d: t=%3d start\n", index, tag) depth++ case WireEndGroup: depth-- if err != nil { fmt.Printf("%3d: t=%3d end err %v\n", index, tag, err) break out } fmt.Printf("%3d: t=%3d end\n", index, tag) } } if depth != 0 { fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) } fmt.Printf("\n") p.buf = obuf p.index = index } // SetDefaults sets unset protocol buffer fields to their default values. // It only modifies fields that are both unset and have defined defaults. // It recursively sets default values in any non-nil sub-messages. func SetDefaults(pb Message) { setDefaults(reflect.ValueOf(pb), true, false) } // v is a pointer to a struct. func setDefaults(v reflect.Value, recur, zeros bool) { v = v.Elem() defaultMu.RLock() dm, ok := defaults[v.Type()] defaultMu.RUnlock() if !ok { dm = buildDefaultMessage(v.Type()) defaultMu.Lock() defaults[v.Type()] = dm defaultMu.Unlock() } for _, sf := range dm.scalars { f := v.Field(sf.index) if !f.IsNil() { // field already set continue } dv := sf.value if dv == nil && !zeros { // no explicit default, and don't want to set zeros continue } fptr := f.Addr().Interface() // **T // TODO: Consider batching the allocations we do here. switch sf.kind { case reflect.Bool: b := new(bool) if dv != nil { *b = dv.(bool) } *(fptr.(**bool)) = b case reflect.Float32: f := new(float32) if dv != nil { *f = dv.(float32) } *(fptr.(**float32)) = f case reflect.Float64: f := new(float64) if dv != nil { *f = dv.(float64) } *(fptr.(**float64)) = f case reflect.Int32: // might be an enum if ft := f.Type(); ft != int32PtrType { // enum f.Set(reflect.New(ft.Elem())) if dv != nil { f.Elem().SetInt(int64(dv.(int32))) } } else { // int32 field i := new(int32) if dv != nil { *i = dv.(int32) } *(fptr.(**int32)) = i } case reflect.Int64: i := new(int64) if dv != nil { *i = dv.(int64) } *(fptr.(**int64)) = i case reflect.String: s := new(string) if dv != nil { *s = dv.(string) } *(fptr.(**string)) = s case reflect.Uint8: // exceptional case: []byte var b []byte if dv != nil { db := dv.([]byte) b = make([]byte, len(db)) copy(b, db) } else { b = []byte{} } *(fptr.(*[]byte)) = b case reflect.Uint32: u := new(uint32) if dv != nil { *u = dv.(uint32) } *(fptr.(**uint32)) = u case reflect.Uint64: u := new(uint64) if dv != nil { *u = dv.(uint64) } *(fptr.(**uint64)) = u default: log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) } } for _, ni := range dm.nested { f := v.Field(ni) // f is *T or []*T or map[T]*T switch f.Kind() { case reflect.Ptr: if f.IsNil() { continue } setDefaults(f, recur, zeros) case reflect.Slice: for i := 0; i < f.Len(); i++ { e := f.Index(i) if e.IsNil() { continue } setDefaults(e, recur, zeros) } case reflect.Map: for _, k := range f.MapKeys() { e := f.MapIndex(k) if e.IsNil() { continue } setDefaults(e, recur, zeros) } } } } var ( // defaults maps a protocol buffer struct type to a slice of the fields, // with its scalar fields set to their proto-declared non-zero default values. defaultMu sync.RWMutex defaults = make(map[reflect.Type]defaultMessage) int32PtrType = reflect.TypeOf((*int32)(nil)) ) // defaultMessage represents information about the default values of a message. type defaultMessage struct { scalars []scalarField nested []int // struct field index of nested messages } type scalarField struct { index int // struct field index kind reflect.Kind // element type (the T in *T or []T) value interface{} // the proto-declared default value, or nil } // t is a struct type. func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { sprop := GetProperties(t) for _, prop := range sprop.Prop { fi, ok := sprop.decoderTags.get(prop.Tag) if !ok { // XXX_unrecognized continue } ft := t.Field(fi).Type sf, nested, err := fieldDefault(ft, prop) switch { case err != nil: log.Print(err) case nested: dm.nested = append(dm.nested, fi) case sf != nil: sf.index = fi dm.scalars = append(dm.scalars, *sf) } } return dm } // fieldDefault returns the scalarField for field type ft. // sf will be nil if the field can not have a default. // nestedMessage will be true if this is a nested message. // Note that sf.index is not set on return. func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { var canHaveDefault bool switch ft.Kind() { case reflect.Ptr: if ft.Elem().Kind() == reflect.Struct { nestedMessage = true } else { canHaveDefault = true // proto2 scalar field } case reflect.Slice: switch ft.Elem().Kind() { case reflect.Ptr: nestedMessage = true // repeated message case reflect.Uint8: canHaveDefault = true // bytes field } case reflect.Map: if ft.Elem().Kind() == reflect.Ptr { nestedMessage = true // map with message values } } if !canHaveDefault { if nestedMessage { return nil, true, nil } return nil, false, nil } // We now know that ft is a pointer or slice. sf = &scalarField{kind: ft.Elem().Kind()} // scalar fields without defaults if !prop.HasDefault { return sf, false, nil } // a scalar field: either *T or []byte switch ft.Elem().Kind() { case reflect.Bool: x, err := strconv.ParseBool(prop.Default) if err != nil { return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) } sf.value = x case reflect.Float32: x, err := strconv.ParseFloat(prop.Default, 32) if err != nil { return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) } sf.value = float32(x) case reflect.Float64: x, err := strconv.ParseFloat(prop.Default, 64) if err != nil { return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) } sf.value = x case reflect.Int32: x, err := strconv.ParseInt(prop.Default, 10, 32) if err != nil { return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) } sf.value = int32(x) case reflect.Int64: x, err := strconv.ParseInt(prop.Default, 10, 64) if err != nil { return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) } sf.value = x case reflect.String: sf.value = prop.Default case reflect.Uint8: // []byte (not *uint8) sf.value = []byte(prop.Default) case reflect.Uint32: x, err := strconv.ParseUint(prop.Default, 10, 32) if err != nil { return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) } sf.value = uint32(x) case reflect.Uint64: x, err := strconv.ParseUint(prop.Default, 10, 64) if err != nil { return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) } sf.value = x default: return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) } return sf, false, nil } // Map fields may have key types of non-float scalars, strings and enums. // The easiest way to sort them in some deterministic order is to use fmt. // If this turns out to be inefficient we can always consider other options, // such as doing a Schwartzian transform. type mapKeys []reflect.Value func (s mapKeys) Len() int { return len(s) } func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s mapKeys) Less(i, j int) bool { return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface()) } ================================================ FILE: vendor/github.com/golang/protobuf/proto/message_set.go ================================================ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package proto /* * Support for message sets. */ import ( "bytes" "encoding/json" "errors" "fmt" "reflect" "sort" ) // ErrNoMessageTypeId occurs when a protocol buffer does not have a message type ID. // A message type ID is required for storing a protocol buffer in a message set. var ErrNoMessageTypeId = errors.New("proto does not have a message type ID") // The first two types (_MessageSet_Item and MessageSet) // model what the protocol compiler produces for the following protocol message: // message MessageSet { // repeated group Item = 1 { // required int32 type_id = 2; // required string message = 3; // }; // } // That is the MessageSet wire format. We can't use a proto to generate these // because that would introduce a circular dependency between it and this package. // // When a proto1 proto has a field that looks like: // optional message info = 3; // the protocol compiler produces a field in the generated struct that looks like: // Info *_proto_.MessageSet `protobuf:"bytes,3,opt,name=info"` // The package is automatically inserted so there is no need for that proto file to // import this package. type _MessageSet_Item struct { TypeId *int32 `protobuf:"varint,2,req,name=type_id"` Message []byte `protobuf:"bytes,3,req,name=message"` } type MessageSet struct { Item []*_MessageSet_Item `protobuf:"group,1,rep"` XXX_unrecognized []byte // TODO: caching? } // Make sure MessageSet is a Message. var _ Message = (*MessageSet)(nil) // messageTypeIder is an interface satisfied by a protocol buffer type // that may be stored in a MessageSet. type messageTypeIder interface { MessageTypeId() int32 } func (ms *MessageSet) find(pb Message) *_MessageSet_Item { mti, ok := pb.(messageTypeIder) if !ok { return nil } id := mti.MessageTypeId() for _, item := range ms.Item { if *item.TypeId == id { return item } } return nil } func (ms *MessageSet) Has(pb Message) bool { if ms.find(pb) != nil { return true } return false } func (ms *MessageSet) Unmarshal(pb Message) error { if item := ms.find(pb); item != nil { return Unmarshal(item.Message, pb) } if _, ok := pb.(messageTypeIder); !ok { return ErrNoMessageTypeId } return nil // TODO: return error instead? } func (ms *MessageSet) Marshal(pb Message) error { msg, err := Marshal(pb) if err != nil { return err } if item := ms.find(pb); item != nil { // reuse existing item item.Message = msg return nil } mti, ok := pb.(messageTypeIder) if !ok { return ErrNoMessageTypeId } mtid := mti.MessageTypeId() ms.Item = append(ms.Item, &_MessageSet_Item{ TypeId: &mtid, Message: msg, }) return nil } func (ms *MessageSet) Reset() { *ms = MessageSet{} } func (ms *MessageSet) String() string { return CompactTextString(ms) } func (*MessageSet) ProtoMessage() {} // Support for the message_set_wire_format message option. func skipVarint(buf []byte) []byte { i := 0 for ; buf[i]&0x80 != 0; i++ { } return buf[i+1:] } // MarshalMessageSet encodes the extension map represented by m in the message set wire format. // It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. func MarshalMessageSet(m map[int32]Extension) ([]byte, error) { if err := encodeExtensionMap(m); err != nil { return nil, err } // Sort extension IDs to provide a deterministic encoding. // See also enc_map in encode.go. ids := make([]int, 0, len(m)) for id := range m { ids = append(ids, int(id)) } sort.Ints(ids) ms := &MessageSet{Item: make([]*_MessageSet_Item, 0, len(m))} for _, id := range ids { e := m[int32(id)] // Remove the wire type and field number varint, as well as the length varint. msg := skipVarint(skipVarint(e.enc)) ms.Item = append(ms.Item, &_MessageSet_Item{ TypeId: Int32(int32(id)), Message: msg, }) } return Marshal(ms) } // UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. // It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error { ms := new(MessageSet) if err := Unmarshal(buf, ms); err != nil { return err } for _, item := range ms.Item { id := *item.TypeId msg := item.Message // Restore wire type and field number varint, plus length varint. // Be careful to preserve duplicate items. b := EncodeVarint(uint64(id)<<3 | WireBytes) if ext, ok := m[id]; ok { // Existing data; rip off the tag and length varint // so we join the new data correctly. // We can assume that ext.enc is set because we are unmarshaling. o := ext.enc[len(b):] // skip wire type and field number _, n := DecodeVarint(o) // calculate length of length varint o = o[n:] // skip length varint msg = append(o, msg...) // join old data and new data } b = append(b, EncodeVarint(uint64(len(msg)))...) b = append(b, msg...) m[id] = Extension{enc: b} } return nil } // MarshalMessageSetJSON encodes the extension map represented by m in JSON format. // It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) { var b bytes.Buffer b.WriteByte('{') // Process the map in key order for deterministic output. ids := make([]int32, 0, len(m)) for id := range m { ids = append(ids, id) } sort.Sort(int32Slice(ids)) // int32Slice defined in text.go for i, id := range ids { ext := m[id] if i > 0 { b.WriteByte(',') } msd, ok := messageSetMap[id] if !ok { // Unknown type; we can't render it, so skip it. continue } fmt.Fprintf(&b, `"[%s]":`, msd.name) x := ext.value if x == nil { x = reflect.New(msd.t.Elem()).Interface() if err := Unmarshal(ext.enc, x.(Message)); err != nil { return nil, err } } d, err := json.Marshal(x) if err != nil { return nil, err } b.Write(d) } b.WriteByte('}') return b.Bytes(), nil } // UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. // It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error { // Common-case fast path. if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { return nil } // This is fairly tricky, and it's not clear that it is needed. return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") } // A global registry of types that can be used in a MessageSet. var messageSetMap = make(map[int32]messageSetDesc) type messageSetDesc struct { t reflect.Type // pointer to struct name string } // RegisterMessageSetType is called from the generated code. func RegisterMessageSetType(m Message, fieldNum int32, name string) { messageSetMap[fieldNum] = messageSetDesc{ t: reflect.TypeOf(m), name: name, } } ================================================ FILE: vendor/github.com/golang/protobuf/proto/pointer_reflect.go ================================================ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2012 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // +build appengine // This file contains an implementation of proto field accesses using package reflect. // It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can // be used on App Engine. package proto import ( "math" "reflect" ) // A structPointer is a pointer to a struct. type structPointer struct { v reflect.Value } // toStructPointer returns a structPointer equivalent to the given reflect value. // The reflect value must itself be a pointer to a struct. func toStructPointer(v reflect.Value) structPointer { return structPointer{v} } // IsNil reports whether p is nil. func structPointer_IsNil(p structPointer) bool { return p.v.IsNil() } // Interface returns the struct pointer as an interface value. func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { return p.v.Interface() } // A field identifies a field in a struct, accessible from a structPointer. // In this implementation, a field is identified by the sequence of field indices // passed to reflect's FieldByIndex. type field []int // toField returns a field equivalent to the given reflect field. func toField(f *reflect.StructField) field { return f.Index } // invalidField is an invalid field identifier. var invalidField = field(nil) // IsValid reports whether the field identifier is valid. func (f field) IsValid() bool { return f != nil } // field returns the given field in the struct as a reflect value. func structPointer_field(p structPointer, f field) reflect.Value { // Special case: an extension map entry with a value of type T // passes a *T to the struct-handling code with a zero field, // expecting that it will be treated as equivalent to *struct{ X T }, // which has the same memory layout. We have to handle that case // specially, because reflect will panic if we call FieldByIndex on a // non-struct. if f == nil { return p.v.Elem() } return p.v.Elem().FieldByIndex(f) } // ifield returns the given field in the struct as an interface value. func structPointer_ifield(p structPointer, f field) interface{} { return structPointer_field(p, f).Addr().Interface() } // Bytes returns the address of a []byte field in the struct. func structPointer_Bytes(p structPointer, f field) *[]byte { return structPointer_ifield(p, f).(*[]byte) } // BytesSlice returns the address of a [][]byte field in the struct. func structPointer_BytesSlice(p structPointer, f field) *[][]byte { return structPointer_ifield(p, f).(*[][]byte) } // Bool returns the address of a *bool field in the struct. func structPointer_Bool(p structPointer, f field) **bool { return structPointer_ifield(p, f).(**bool) } // BoolVal returns the address of a bool field in the struct. func structPointer_BoolVal(p structPointer, f field) *bool { return structPointer_ifield(p, f).(*bool) } // BoolSlice returns the address of a []bool field in the struct. func structPointer_BoolSlice(p structPointer, f field) *[]bool { return structPointer_ifield(p, f).(*[]bool) } // String returns the address of a *string field in the struct. func structPointer_String(p structPointer, f field) **string { return structPointer_ifield(p, f).(**string) } // StringVal returns the address of a string field in the struct. func structPointer_StringVal(p structPointer, f field) *string { return structPointer_ifield(p, f).(*string) } // StringSlice returns the address of a []string field in the struct. func structPointer_StringSlice(p structPointer, f field) *[]string { return structPointer_ifield(p, f).(*[]string) } // ExtMap returns the address of an extension map field in the struct. func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { return structPointer_ifield(p, f).(*map[int32]Extension) } // Map returns the reflect.Value for the address of a map field in the struct. func structPointer_Map(p structPointer, f field, typ reflect.Type) reflect.Value { return structPointer_field(p, f).Addr() } // SetStructPointer writes a *struct field in the struct. func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { structPointer_field(p, f).Set(q.v) } // GetStructPointer reads a *struct field in the struct. func structPointer_GetStructPointer(p structPointer, f field) structPointer { return structPointer{structPointer_field(p, f)} } // StructPointerSlice the address of a []*struct field in the struct. func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { return structPointerSlice{structPointer_field(p, f)} } // A structPointerSlice represents the address of a slice of pointers to structs // (themselves messages or groups). That is, v.Type() is *[]*struct{...}. type structPointerSlice struct { v reflect.Value } func (p structPointerSlice) Len() int { return p.v.Len() } func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } func (p structPointerSlice) Append(q structPointer) { p.v.Set(reflect.Append(p.v, q.v)) } var ( int32Type = reflect.TypeOf(int32(0)) uint32Type = reflect.TypeOf(uint32(0)) float32Type = reflect.TypeOf(float32(0)) int64Type = reflect.TypeOf(int64(0)) uint64Type = reflect.TypeOf(uint64(0)) float64Type = reflect.TypeOf(float64(0)) ) // A word32 represents a field of type *int32, *uint32, *float32, or *enum. // That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. type word32 struct { v reflect.Value } // IsNil reports whether p is nil. func word32_IsNil(p word32) bool { return p.v.IsNil() } // Set sets p to point at a newly allocated word with bits set to x. func word32_Set(p word32, o *Buffer, x uint32) { t := p.v.Type().Elem() switch t { case int32Type: if len(o.int32s) == 0 { o.int32s = make([]int32, uint32PoolSize) } o.int32s[0] = int32(x) p.v.Set(reflect.ValueOf(&o.int32s[0])) o.int32s = o.int32s[1:] return case uint32Type: if len(o.uint32s) == 0 { o.uint32s = make([]uint32, uint32PoolSize) } o.uint32s[0] = x p.v.Set(reflect.ValueOf(&o.uint32s[0])) o.uint32s = o.uint32s[1:] return case float32Type: if len(o.float32s) == 0 { o.float32s = make([]float32, uint32PoolSize) } o.float32s[0] = math.Float32frombits(x) p.v.Set(reflect.ValueOf(&o.float32s[0])) o.float32s = o.float32s[1:] return } // must be enum p.v.Set(reflect.New(t)) p.v.Elem().SetInt(int64(int32(x))) } // Get gets the bits pointed at by p, as a uint32. func word32_Get(p word32) uint32 { elem := p.v.Elem() switch elem.Kind() { case reflect.Int32: return uint32(elem.Int()) case reflect.Uint32: return uint32(elem.Uint()) case reflect.Float32: return math.Float32bits(float32(elem.Float())) } panic("unreachable") } // Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. func structPointer_Word32(p structPointer, f field) word32 { return word32{structPointer_field(p, f)} } // A word32Val represents a field of type int32, uint32, float32, or enum. // That is, v.Type() is int32, uint32, float32, or enum and v is assignable. type word32Val struct { v reflect.Value } // Set sets *p to x. func word32Val_Set(p word32Val, x uint32) { switch p.v.Type() { case int32Type: p.v.SetInt(int64(x)) return case uint32Type: p.v.SetUint(uint64(x)) return case float32Type: p.v.SetFloat(float64(math.Float32frombits(x))) return } // must be enum p.v.SetInt(int64(int32(x))) } // Get gets the bits pointed at by p, as a uint32. func word32Val_Get(p word32Val) uint32 { elem := p.v switch elem.Kind() { case reflect.Int32: return uint32(elem.Int()) case reflect.Uint32: return uint32(elem.Uint()) case reflect.Float32: return math.Float32bits(float32(elem.Float())) } panic("unreachable") } // Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. func structPointer_Word32Val(p structPointer, f field) word32Val { return word32Val{structPointer_field(p, f)} } // A word32Slice is a slice of 32-bit values. // That is, v.Type() is []int32, []uint32, []float32, or []enum. type word32Slice struct { v reflect.Value } func (p word32Slice) Append(x uint32) { n, m := p.v.Len(), p.v.Cap() if n < m { p.v.SetLen(n + 1) } else { t := p.v.Type().Elem() p.v.Set(reflect.Append(p.v, reflect.Zero(t))) } elem := p.v.Index(n) switch elem.Kind() { case reflect.Int32: elem.SetInt(int64(int32(x))) case reflect.Uint32: elem.SetUint(uint64(x)) case reflect.Float32: elem.SetFloat(float64(math.Float32frombits(x))) } } func (p word32Slice) Len() int { return p.v.Len() } func (p word32Slice) Index(i int) uint32 { elem := p.v.Index(i) switch elem.Kind() { case reflect.Int32: return uint32(elem.Int()) case reflect.Uint32: return uint32(elem.Uint()) case reflect.Float32: return math.Float32bits(float32(elem.Float())) } panic("unreachable") } // Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. func structPointer_Word32Slice(p structPointer, f field) word32Slice { return word32Slice{structPointer_field(p, f)} } // word64 is like word32 but for 64-bit values. type word64 struct { v reflect.Value } func word64_Set(p word64, o *Buffer, x uint64) { t := p.v.Type().Elem() switch t { case int64Type: if len(o.int64s) == 0 { o.int64s = make([]int64, uint64PoolSize) } o.int64s[0] = int64(x) p.v.Set(reflect.ValueOf(&o.int64s[0])) o.int64s = o.int64s[1:] return case uint64Type: if len(o.uint64s) == 0 { o.uint64s = make([]uint64, uint64PoolSize) } o.uint64s[0] = x p.v.Set(reflect.ValueOf(&o.uint64s[0])) o.uint64s = o.uint64s[1:] return case float64Type: if len(o.float64s) == 0 { o.float64s = make([]float64, uint64PoolSize) } o.float64s[0] = math.Float64frombits(x) p.v.Set(reflect.ValueOf(&o.float64s[0])) o.float64s = o.float64s[1:] return } panic("unreachable") } func word64_IsNil(p word64) bool { return p.v.IsNil() } func word64_Get(p word64) uint64 { elem := p.v.Elem() switch elem.Kind() { case reflect.Int64: return uint64(elem.Int()) case reflect.Uint64: return elem.Uint() case reflect.Float64: return math.Float64bits(elem.Float()) } panic("unreachable") } func structPointer_Word64(p structPointer, f field) word64 { return word64{structPointer_field(p, f)} } // word64Val is like word32Val but for 64-bit values. type word64Val struct { v reflect.Value } func word64Val_Set(p word64Val, o *Buffer, x uint64) { switch p.v.Type() { case int64Type: p.v.SetInt(int64(x)) return case uint64Type: p.v.SetUint(x) return case float64Type: p.v.SetFloat(math.Float64frombits(x)) return } panic("unreachable") } func word64Val_Get(p word64Val) uint64 { elem := p.v switch elem.Kind() { case reflect.Int64: return uint64(elem.Int()) case reflect.Uint64: return elem.Uint() case reflect.Float64: return math.Float64bits(elem.Float()) } panic("unreachable") } func structPointer_Word64Val(p structPointer, f field) word64Val { return word64Val{structPointer_field(p, f)} } type word64Slice struct { v reflect.Value } func (p word64Slice) Append(x uint64) { n, m := p.v.Len(), p.v.Cap() if n < m { p.v.SetLen(n + 1) } else { t := p.v.Type().Elem() p.v.Set(reflect.Append(p.v, reflect.Zero(t))) } elem := p.v.Index(n) switch elem.Kind() { case reflect.Int64: elem.SetInt(int64(int64(x))) case reflect.Uint64: elem.SetUint(uint64(x)) case reflect.Float64: elem.SetFloat(float64(math.Float64frombits(x))) } } func (p word64Slice) Len() int { return p.v.Len() } func (p word64Slice) Index(i int) uint64 { elem := p.v.Index(i) switch elem.Kind() { case reflect.Int64: return uint64(elem.Int()) case reflect.Uint64: return uint64(elem.Uint()) case reflect.Float64: return math.Float64bits(float64(elem.Float())) } panic("unreachable") } func structPointer_Word64Slice(p structPointer, f field) word64Slice { return word64Slice{structPointer_field(p, f)} } ================================================ FILE: vendor/github.com/golang/protobuf/proto/pointer_unsafe.go ================================================ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2012 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // +build !appengine // This file contains the implementation of the proto field accesses using package unsafe. package proto import ( "reflect" "unsafe" ) // NOTE: These type_Foo functions would more idiomatically be methods, // but Go does not allow methods on pointer types, and we must preserve // some pointer type for the garbage collector. We use these // funcs with clunky names as our poor approximation to methods. // // An alternative would be // type structPointer struct { p unsafe.Pointer } // but that does not registerize as well. // A structPointer is a pointer to a struct. type structPointer unsafe.Pointer // toStructPointer returns a structPointer equivalent to the given reflect value. func toStructPointer(v reflect.Value) structPointer { return structPointer(unsafe.Pointer(v.Pointer())) } // IsNil reports whether p is nil. func structPointer_IsNil(p structPointer) bool { return p == nil } // Interface returns the struct pointer, assumed to have element type t, // as an interface value. func structPointer_Interface(p structPointer, t reflect.Type) interface{} { return reflect.NewAt(t, unsafe.Pointer(p)).Interface() } // A field identifies a field in a struct, accessible from a structPointer. // In this implementation, a field is identified by its byte offset from the start of the struct. type field uintptr // toField returns a field equivalent to the given reflect field. func toField(f *reflect.StructField) field { return field(f.Offset) } // invalidField is an invalid field identifier. const invalidField = ^field(0) // IsValid reports whether the field identifier is valid. func (f field) IsValid() bool { return f != ^field(0) } // Bytes returns the address of a []byte field in the struct. func structPointer_Bytes(p structPointer, f field) *[]byte { return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) } // BytesSlice returns the address of a [][]byte field in the struct. func structPointer_BytesSlice(p structPointer, f field) *[][]byte { return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) } // Bool returns the address of a *bool field in the struct. func structPointer_Bool(p structPointer, f field) **bool { return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) } // BoolVal returns the address of a bool field in the struct. func structPointer_BoolVal(p structPointer, f field) *bool { return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) } // BoolSlice returns the address of a []bool field in the struct. func structPointer_BoolSlice(p structPointer, f field) *[]bool { return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) } // String returns the address of a *string field in the struct. func structPointer_String(p structPointer, f field) **string { return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) } // StringVal returns the address of a string field in the struct. func structPointer_StringVal(p structPointer, f field) *string { return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) } // StringSlice returns the address of a []string field in the struct. func structPointer_StringSlice(p structPointer, f field) *[]string { return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) } // ExtMap returns the address of an extension map field in the struct. func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) } // Map returns the reflect.Value for the address of a map field in the struct. func structPointer_Map(p structPointer, f field, typ reflect.Type) reflect.Value { return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) } // SetStructPointer writes a *struct field in the struct. func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q } // GetStructPointer reads a *struct field in the struct. func structPointer_GetStructPointer(p structPointer, f field) structPointer { return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) } // StructPointerSlice the address of a []*struct field in the struct. func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) } // A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). type structPointerSlice []structPointer func (v *structPointerSlice) Len() int { return len(*v) } func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } // A word32 is the address of a "pointer to 32-bit value" field. type word32 **uint32 // IsNil reports whether *v is nil. func word32_IsNil(p word32) bool { return *p == nil } // Set sets *v to point at a newly allocated word set to x. func word32_Set(p word32, o *Buffer, x uint32) { if len(o.uint32s) == 0 { o.uint32s = make([]uint32, uint32PoolSize) } o.uint32s[0] = x *p = &o.uint32s[0] o.uint32s = o.uint32s[1:] } // Get gets the value pointed at by *v. func word32_Get(p word32) uint32 { return **p } // Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. func structPointer_Word32(p structPointer, f field) word32 { return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) } // A word32Val is the address of a 32-bit value field. type word32Val *uint32 // Set sets *p to x. func word32Val_Set(p word32Val, x uint32) { *p = x } // Get gets the value pointed at by p. func word32Val_Get(p word32Val) uint32 { return *p } // Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. func structPointer_Word32Val(p structPointer, f field) word32Val { return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) } // A word32Slice is a slice of 32-bit values. type word32Slice []uint32 func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } func (v *word32Slice) Len() int { return len(*v) } func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } // Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. func structPointer_Word32Slice(p structPointer, f field) *word32Slice { return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) } // word64 is like word32 but for 64-bit values. type word64 **uint64 func word64_Set(p word64, o *Buffer, x uint64) { if len(o.uint64s) == 0 { o.uint64s = make([]uint64, uint64PoolSize) } o.uint64s[0] = x *p = &o.uint64s[0] o.uint64s = o.uint64s[1:] } func word64_IsNil(p word64) bool { return *p == nil } func word64_Get(p word64) uint64 { return **p } func structPointer_Word64(p structPointer, f field) word64 { return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) } // word64Val is like word32Val but for 64-bit values. type word64Val *uint64 func word64Val_Set(p word64Val, o *Buffer, x uint64) { *p = x } func word64Val_Get(p word64Val) uint64 { return *p } func structPointer_Word64Val(p structPointer, f field) word64Val { return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) } // word64Slice is like word32Slice but for 64-bit values. type word64Slice []uint64 func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } func (v *word64Slice) Len() int { return len(*v) } func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } func structPointer_Word64Slice(p structPointer, f field) *word64Slice { return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) } ================================================ FILE: vendor/github.com/golang/protobuf/proto/properties.go ================================================ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package proto /* * Routines for encoding data into the wire format for protocol buffers. */ import ( "fmt" "os" "reflect" "sort" "strconv" "strings" "sync" ) const debug bool = false // Constants that identify the encoding of a value on the wire. const ( WireVarint = 0 WireFixed64 = 1 WireBytes = 2 WireStartGroup = 3 WireEndGroup = 4 WireFixed32 = 5 ) const startSize = 10 // initial slice/string sizes // Encoders are defined in encode.go // An encoder outputs the full representation of a field, including its // tag and encoder type. type encoder func(p *Buffer, prop *Properties, base structPointer) error // A valueEncoder encodes a single integer in a particular encoding. type valueEncoder func(o *Buffer, x uint64) error // Sizers are defined in encode.go // A sizer returns the encoded size of a field, including its tag and encoder // type. type sizer func(prop *Properties, base structPointer) int // A valueSizer returns the encoded size of a single integer in a particular // encoding. type valueSizer func(x uint64) int // Decoders are defined in decode.go // A decoder creates a value from its wire representation. // Unrecognized subelements are saved in unrec. type decoder func(p *Buffer, prop *Properties, base structPointer) error // A valueDecoder decodes a single integer in a particular encoding. type valueDecoder func(o *Buffer) (x uint64, err error) // tagMap is an optimization over map[int]int for typical protocol buffer // use-cases. Encoded protocol buffers are often in tag order with small tag // numbers. type tagMap struct { fastTags []int slowTags map[int]int } // tagMapFastLimit is the upper bound on the tag number that will be stored in // the tagMap slice rather than its map. const tagMapFastLimit = 1024 func (p *tagMap) get(t int) (int, bool) { if t > 0 && t < tagMapFastLimit { if t >= len(p.fastTags) { return 0, false } fi := p.fastTags[t] return fi, fi >= 0 } fi, ok := p.slowTags[t] return fi, ok } func (p *tagMap) put(t int, fi int) { if t > 0 && t < tagMapFastLimit { for len(p.fastTags) < t+1 { p.fastTags = append(p.fastTags, -1) } p.fastTags[t] = fi return } if p.slowTags == nil { p.slowTags = make(map[int]int) } p.slowTags[t] = fi } // StructProperties represents properties for all the fields of a struct. // decoderTags and decoderOrigNames should only be used by the decoder. type StructProperties struct { Prop []*Properties // properties for each field reqCount int // required count decoderTags tagMap // map from proto tag to struct field number decoderOrigNames map[string]int // map from original name to struct field number order []int // list of struct field numbers in tag order unrecField field // field id of the XXX_unrecognized []byte field extendable bool // is this an extendable proto } // Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. // See encode.go, (*Buffer).enc_struct. func (sp *StructProperties) Len() int { return len(sp.order) } func (sp *StructProperties) Less(i, j int) bool { return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag } func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } // Properties represents the protocol-specific behavior of a single struct field. type Properties struct { Name string // name of the field, for error messages OrigName string // original name before protocol compiler (always set) Wire string WireType int Tag int Required bool Optional bool Repeated bool Packed bool // relevant for repeated primitives only Enum string // set for enum types only proto3 bool // whether this is known to be a proto3 field; set for []byte only Default string // default value HasDefault bool // whether an explicit default was provided def_uint64 uint64 enc encoder valEnc valueEncoder // set for bool and numeric types only field field tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) tagbuf [8]byte stype reflect.Type // set for struct types only sprop *StructProperties // set for struct types only isMarshaler bool isUnmarshaler bool mtype reflect.Type // set for map types only mkeyprop *Properties // set for map types only mvalprop *Properties // set for map types only size sizer valSize valueSizer // set for bool and numeric types only dec decoder valDec valueDecoder // set for bool and numeric types only // If this is a packable field, this will be the decoder for the packed version of the field. packedDec decoder } // String formats the properties in the protobuf struct field tag style. func (p *Properties) String() string { s := p.Wire s = "," s += strconv.Itoa(p.Tag) if p.Required { s += ",req" } if p.Optional { s += ",opt" } if p.Repeated { s += ",rep" } if p.Packed { s += ",packed" } if p.OrigName != p.Name { s += ",name=" + p.OrigName } if p.proto3 { s += ",proto3" } if len(p.Enum) > 0 { s += ",enum=" + p.Enum } if p.HasDefault { s += ",def=" + p.Default } return s } // Parse populates p by parsing a string in the protobuf struct field tag style. func (p *Properties) Parse(s string) { // "bytes,49,opt,name=foo,def=hello!" fields := strings.Split(s, ",") // breaks def=, but handled below. if len(fields) < 2 { fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) return } p.Wire = fields[0] switch p.Wire { case "varint": p.WireType = WireVarint p.valEnc = (*Buffer).EncodeVarint p.valDec = (*Buffer).DecodeVarint p.valSize = sizeVarint case "fixed32": p.WireType = WireFixed32 p.valEnc = (*Buffer).EncodeFixed32 p.valDec = (*Buffer).DecodeFixed32 p.valSize = sizeFixed32 case "fixed64": p.WireType = WireFixed64 p.valEnc = (*Buffer).EncodeFixed64 p.valDec = (*Buffer).DecodeFixed64 p.valSize = sizeFixed64 case "zigzag32": p.WireType = WireVarint p.valEnc = (*Buffer).EncodeZigzag32 p.valDec = (*Buffer).DecodeZigzag32 p.valSize = sizeZigzag32 case "zigzag64": p.WireType = WireVarint p.valEnc = (*Buffer).EncodeZigzag64 p.valDec = (*Buffer).DecodeZigzag64 p.valSize = sizeZigzag64 case "bytes", "group": p.WireType = WireBytes // no numeric converter for non-numeric types default: fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) return } var err error p.Tag, err = strconv.Atoi(fields[1]) if err != nil { return } for i := 2; i < len(fields); i++ { f := fields[i] switch { case f == "req": p.Required = true case f == "opt": p.Optional = true case f == "rep": p.Repeated = true case f == "packed": p.Packed = true case strings.HasPrefix(f, "name="): p.OrigName = f[5:] case strings.HasPrefix(f, "enum="): p.Enum = f[5:] case f == "proto3": p.proto3 = true case strings.HasPrefix(f, "def="): p.HasDefault = true p.Default = f[4:] // rest of string if i+1 < len(fields) { // Commas aren't escaped, and def is always last. p.Default += "," + strings.Join(fields[i+1:], ",") break } } } } func logNoSliceEnc(t1, t2 reflect.Type) { fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) } var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() // Initialize the fields for encoding and decoding. func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { p.enc = nil p.dec = nil p.size = nil switch t1 := typ; t1.Kind() { default: fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) // proto3 scalar types case reflect.Bool: p.enc = (*Buffer).enc_proto3_bool p.dec = (*Buffer).dec_proto3_bool p.size = size_proto3_bool case reflect.Int32: p.enc = (*Buffer).enc_proto3_int32 p.dec = (*Buffer).dec_proto3_int32 p.size = size_proto3_int32 case reflect.Uint32: p.enc = (*Buffer).enc_proto3_uint32 p.dec = (*Buffer).dec_proto3_int32 // can reuse p.size = size_proto3_uint32 case reflect.Int64, reflect.Uint64: p.enc = (*Buffer).enc_proto3_int64 p.dec = (*Buffer).dec_proto3_int64 p.size = size_proto3_int64 case reflect.Float32: p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits p.dec = (*Buffer).dec_proto3_int32 p.size = size_proto3_uint32 case reflect.Float64: p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits p.dec = (*Buffer).dec_proto3_int64 p.size = size_proto3_int64 case reflect.String: p.enc = (*Buffer).enc_proto3_string p.dec = (*Buffer).dec_proto3_string p.size = size_proto3_string case reflect.Ptr: switch t2 := t1.Elem(); t2.Kind() { default: fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) break case reflect.Bool: p.enc = (*Buffer).enc_bool p.dec = (*Buffer).dec_bool p.size = size_bool case reflect.Int32: p.enc = (*Buffer).enc_int32 p.dec = (*Buffer).dec_int32 p.size = size_int32 case reflect.Uint32: p.enc = (*Buffer).enc_uint32 p.dec = (*Buffer).dec_int32 // can reuse p.size = size_uint32 case reflect.Int64, reflect.Uint64: p.enc = (*Buffer).enc_int64 p.dec = (*Buffer).dec_int64 p.size = size_int64 case reflect.Float32: p.enc = (*Buffer).enc_uint32 // can just treat them as bits p.dec = (*Buffer).dec_int32 p.size = size_uint32 case reflect.Float64: p.enc = (*Buffer).enc_int64 // can just treat them as bits p.dec = (*Buffer).dec_int64 p.size = size_int64 case reflect.String: p.enc = (*Buffer).enc_string p.dec = (*Buffer).dec_string p.size = size_string case reflect.Struct: p.stype = t1.Elem() p.isMarshaler = isMarshaler(t1) p.isUnmarshaler = isUnmarshaler(t1) if p.Wire == "bytes" { p.enc = (*Buffer).enc_struct_message p.dec = (*Buffer).dec_struct_message p.size = size_struct_message } else { p.enc = (*Buffer).enc_struct_group p.dec = (*Buffer).dec_struct_group p.size = size_struct_group } } case reflect.Slice: switch t2 := t1.Elem(); t2.Kind() { default: logNoSliceEnc(t1, t2) break case reflect.Bool: if p.Packed { p.enc = (*Buffer).enc_slice_packed_bool p.size = size_slice_packed_bool } else { p.enc = (*Buffer).enc_slice_bool p.size = size_slice_bool } p.dec = (*Buffer).dec_slice_bool p.packedDec = (*Buffer).dec_slice_packed_bool case reflect.Int32: if p.Packed { p.enc = (*Buffer).enc_slice_packed_int32 p.size = size_slice_packed_int32 } else { p.enc = (*Buffer).enc_slice_int32 p.size = size_slice_int32 } p.dec = (*Buffer).dec_slice_int32 p.packedDec = (*Buffer).dec_slice_packed_int32 case reflect.Uint32: if p.Packed { p.enc = (*Buffer).enc_slice_packed_uint32 p.size = size_slice_packed_uint32 } else { p.enc = (*Buffer).enc_slice_uint32 p.size = size_slice_uint32 } p.dec = (*Buffer).dec_slice_int32 p.packedDec = (*Buffer).dec_slice_packed_int32 case reflect.Int64, reflect.Uint64: if p.Packed { p.enc = (*Buffer).enc_slice_packed_int64 p.size = size_slice_packed_int64 } else { p.enc = (*Buffer).enc_slice_int64 p.size = size_slice_int64 } p.dec = (*Buffer).dec_slice_int64 p.packedDec = (*Buffer).dec_slice_packed_int64 case reflect.Uint8: p.enc = (*Buffer).enc_slice_byte p.dec = (*Buffer).dec_slice_byte p.size = size_slice_byte // This is a []byte, which is either a bytes field, // or the value of a map field. In the latter case, // we always encode an empty []byte, so we should not // use the proto3 enc/size funcs. // f == nil iff this is the key/value of a map field. if p.proto3 && f != nil { p.enc = (*Buffer).enc_proto3_slice_byte p.size = size_proto3_slice_byte } case reflect.Float32, reflect.Float64: switch t2.Bits() { case 32: // can just treat them as bits if p.Packed { p.enc = (*Buffer).enc_slice_packed_uint32 p.size = size_slice_packed_uint32 } else { p.enc = (*Buffer).enc_slice_uint32 p.size = size_slice_uint32 } p.dec = (*Buffer).dec_slice_int32 p.packedDec = (*Buffer).dec_slice_packed_int32 case 64: // can just treat them as bits if p.Packed { p.enc = (*Buffer).enc_slice_packed_int64 p.size = size_slice_packed_int64 } else { p.enc = (*Buffer).enc_slice_int64 p.size = size_slice_int64 } p.dec = (*Buffer).dec_slice_int64 p.packedDec = (*Buffer).dec_slice_packed_int64 default: logNoSliceEnc(t1, t2) break } case reflect.String: p.enc = (*Buffer).enc_slice_string p.dec = (*Buffer).dec_slice_string p.size = size_slice_string case reflect.Ptr: switch t3 := t2.Elem(); t3.Kind() { default: fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) break case reflect.Struct: p.stype = t2.Elem() p.isMarshaler = isMarshaler(t2) p.isUnmarshaler = isUnmarshaler(t2) if p.Wire == "bytes" { p.enc = (*Buffer).enc_slice_struct_message p.dec = (*Buffer).dec_slice_struct_message p.size = size_slice_struct_message } else { p.enc = (*Buffer).enc_slice_struct_group p.dec = (*Buffer).dec_slice_struct_group p.size = size_slice_struct_group } } case reflect.Slice: switch t2.Elem().Kind() { default: fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) break case reflect.Uint8: p.enc = (*Buffer).enc_slice_slice_byte p.dec = (*Buffer).dec_slice_slice_byte p.size = size_slice_slice_byte } } case reflect.Map: p.enc = (*Buffer).enc_new_map p.dec = (*Buffer).dec_new_map p.size = size_new_map p.mtype = t1 p.mkeyprop = &Properties{} p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) p.mvalprop = &Properties{} vtype := p.mtype.Elem() if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { // The value type is not a message (*T) or bytes ([]byte), // so we need encoders for the pointer to this type. vtype = reflect.PtrTo(vtype) } p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) } // precalculate tag code wire := p.WireType if p.Packed { wire = WireBytes } x := uint32(p.Tag)<<3 | uint32(wire) i := 0 for i = 0; x > 127; i++ { p.tagbuf[i] = 0x80 | uint8(x&0x7F) x >>= 7 } p.tagbuf[i] = uint8(x) p.tagcode = p.tagbuf[0 : i+1] if p.stype != nil { if lockGetProp { p.sprop = GetProperties(p.stype) } else { p.sprop = getPropertiesLocked(p.stype) } } } var ( marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() ) // isMarshaler reports whether type t implements Marshaler. func isMarshaler(t reflect.Type) bool { // We're checking for (likely) pointer-receiver methods // so if t is not a pointer, something is very wrong. // The calls above only invoke isMarshaler on pointer types. if t.Kind() != reflect.Ptr { panic("proto: misuse of isMarshaler") } return t.Implements(marshalerType) } // isUnmarshaler reports whether type t implements Unmarshaler. func isUnmarshaler(t reflect.Type) bool { // We're checking for (likely) pointer-receiver methods // so if t is not a pointer, something is very wrong. // The calls above only invoke isUnmarshaler on pointer types. if t.Kind() != reflect.Ptr { panic("proto: misuse of isUnmarshaler") } return t.Implements(unmarshalerType) } // Init populates the properties from a protocol buffer struct tag. func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { p.init(typ, name, tag, f, true) } func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { // "bytes,49,opt,def=hello!" p.Name = name p.OrigName = name if f != nil { p.field = toField(f) } if tag == "" { return } p.Parse(tag) p.setEncAndDec(typ, f, lockGetProp) } var ( propertiesMu sync.RWMutex propertiesMap = make(map[reflect.Type]*StructProperties) ) // GetProperties returns the list of properties for the type represented by t. // t must represent a generated struct type of a protocol message. func GetProperties(t reflect.Type) *StructProperties { if t.Kind() != reflect.Struct { panic("proto: type must have kind struct") } // Most calls to GetProperties in a long-running program will be // retrieving details for types we have seen before. propertiesMu.RLock() sprop, ok := propertiesMap[t] propertiesMu.RUnlock() if ok { if collectStats { stats.Chit++ } return sprop } propertiesMu.Lock() sprop = getPropertiesLocked(t) propertiesMu.Unlock() return sprop } // getPropertiesLocked requires that propertiesMu is held. func getPropertiesLocked(t reflect.Type) *StructProperties { if prop, ok := propertiesMap[t]; ok { if collectStats { stats.Chit++ } return prop } if collectStats { stats.Cmiss++ } prop := new(StructProperties) // in case of recursive protos, fill this in now. propertiesMap[t] = prop // build properties prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) prop.unrecField = invalidField prop.Prop = make([]*Properties, t.NumField()) prop.order = make([]int, t.NumField()) for i := 0; i < t.NumField(); i++ { f := t.Field(i) p := new(Properties) name := f.Name p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) if f.Name == "XXX_extensions" { // special case p.enc = (*Buffer).enc_map p.dec = nil // not needed p.size = size_map } if f.Name == "XXX_unrecognized" { // special case prop.unrecField = toField(&f) } prop.Prop[i] = p prop.order[i] = i if debug { print(i, " ", f.Name, " ", t.String(), " ") if p.Tag > 0 { print(p.String()) } print("\n") } if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") { fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") } } // Re-order prop.order. sort.Sort(prop) // build required counts // build tags reqCount := 0 prop.decoderOrigNames = make(map[string]int) for i, p := range prop.Prop { if strings.HasPrefix(p.Name, "XXX_") { // Internal fields should not appear in tags/origNames maps. // They are handled specially when encoding and decoding. continue } if p.Required { reqCount++ } prop.decoderTags.put(p.Tag, i) prop.decoderOrigNames[p.OrigName] = i } prop.reqCount = reqCount return prop } // Return the Properties object for the x[0]'th field of the structure. func propByIndex(t reflect.Type, x []int) *Properties { if len(x) != 1 { fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) return nil } prop := GetProperties(t) return prop.Prop[x[0]] } // Get the address and type of a pointer to a struct from an interface. func getbase(pb Message) (t reflect.Type, b structPointer, err error) { if pb == nil { err = ErrNil return } // get the reflect type of the pointer to the struct. t = reflect.TypeOf(pb) // get the address of the struct. value := reflect.ValueOf(pb) b = toStructPointer(value) return } // A global registry of enum types. // The generated code will register the generated maps by calling RegisterEnum. var enumValueMaps = make(map[string]map[string]int32) // RegisterEnum is called from the generated code to install the enum descriptor // maps into the global table to aid parsing text format protocol buffers. func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { if _, ok := enumValueMaps[typeName]; ok { panic("proto: duplicate enum registered: " + typeName) } enumValueMaps[typeName] = valueMap } ================================================ FILE: vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go ================================================ // Code generated by protoc-gen-go. // source: proto3_proto/proto3.proto // DO NOT EDIT! /* Package proto3_proto is a generated protocol buffer package. It is generated from these files: proto3_proto/proto3.proto It has these top-level messages: Message Nested MessageWithMap */ package proto3_proto import proto "github.com/golang/protobuf/proto" import testdata "github.com/golang/protobuf/proto/testdata" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal type Message_Humour int32 const ( Message_UNKNOWN Message_Humour = 0 Message_PUNS Message_Humour = 1 Message_SLAPSTICK Message_Humour = 2 Message_BILL_BAILEY Message_Humour = 3 ) var Message_Humour_name = map[int32]string{ 0: "UNKNOWN", 1: "PUNS", 2: "SLAPSTICK", 3: "BILL_BAILEY", } var Message_Humour_value = map[string]int32{ "UNKNOWN": 0, "PUNS": 1, "SLAPSTICK": 2, "BILL_BAILEY": 3, } func (x Message_Humour) String() string { return proto.EnumName(Message_Humour_name, int32(x)) } type Message struct { Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` Hilarity Message_Humour `protobuf:"varint,2,opt,name=hilarity,enum=proto3_proto.Message_Humour" json:"hilarity,omitempty"` HeightInCm uint32 `protobuf:"varint,3,opt,name=height_in_cm" json:"height_in_cm,omitempty"` Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` ResultCount int64 `protobuf:"varint,7,opt,name=result_count" json:"result_count,omitempty"` TrueScotsman bool `protobuf:"varint,8,opt,name=true_scotsman" json:"true_scotsman,omitempty"` Score float32 `protobuf:"fixed32,9,opt,name=score" json:"score,omitempty"` Key []uint64 `protobuf:"varint,5,rep,name=key" json:"key,omitempty"` Nested *Nested `protobuf:"bytes,6,opt,name=nested" json:"nested,omitempty"` Terrain map[string]*Nested `protobuf:"bytes,10,rep,name=terrain" json:"terrain,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` Proto2Field *testdata.SubDefaults `protobuf:"bytes,11,opt,name=proto2_field" json:"proto2_field,omitempty"` Proto2Value map[string]*testdata.SubDefaults `protobuf:"bytes,13,rep,name=proto2_value" json:"proto2_value,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` } func (m *Message) Reset() { *m = Message{} } func (m *Message) String() string { return proto.CompactTextString(m) } func (*Message) ProtoMessage() {} func (m *Message) GetNested() *Nested { if m != nil { return m.Nested } return nil } func (m *Message) GetTerrain() map[string]*Nested { if m != nil { return m.Terrain } return nil } func (m *Message) GetProto2Field() *testdata.SubDefaults { if m != nil { return m.Proto2Field } return nil } func (m *Message) GetProto2Value() map[string]*testdata.SubDefaults { if m != nil { return m.Proto2Value } return nil } type Nested struct { Bunny string `protobuf:"bytes,1,opt,name=bunny" json:"bunny,omitempty"` } func (m *Nested) Reset() { *m = Nested{} } func (m *Nested) String() string { return proto.CompactTextString(m) } func (*Nested) ProtoMessage() {} type MessageWithMap struct { ByteMapping map[bool][]byte `protobuf:"bytes,1,rep,name=byte_mapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } func (*MessageWithMap) ProtoMessage() {} func (m *MessageWithMap) GetByteMapping() map[bool][]byte { if m != nil { return m.ByteMapping } return nil } func init() { proto.RegisterEnum("proto3_proto.Message_Humour", Message_Humour_name, Message_Humour_value) } ================================================ FILE: vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.proto ================================================ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2014 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. syntax = "proto3"; import "testdata/test.proto"; package proto3_proto; message Message { enum Humour { UNKNOWN = 0; PUNS = 1; SLAPSTICK = 2; BILL_BAILEY = 3; } string name = 1; Humour hilarity = 2; uint32 height_in_cm = 3; bytes data = 4; int64 result_count = 7; bool true_scotsman = 8; float score = 9; repeated uint64 key = 5; Nested nested = 6; map terrain = 10; testdata.SubDefaults proto2_field = 11; map proto2_value = 13; } message Nested { string bunny = 1; } message MessageWithMap { map byte_mapping = 1; } ================================================ FILE: vendor/github.com/golang/protobuf/proto/text.go ================================================ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package proto // Functions for writing the text protocol buffer format. import ( "bufio" "bytes" "encoding" "fmt" "io" "log" "math" "os" "reflect" "sort" "strings" ) var ( newline = []byte("\n") spaces = []byte(" ") gtNewline = []byte(">\n") endBraceNewline = []byte("}\n") backslashN = []byte{'\\', 'n'} backslashR = []byte{'\\', 'r'} backslashT = []byte{'\\', 't'} backslashDQ = []byte{'\\', '"'} backslashBS = []byte{'\\', '\\'} posInf = []byte("inf") negInf = []byte("-inf") nan = []byte("nan") ) type writer interface { io.Writer WriteByte(byte) error } // textWriter is an io.Writer that tracks its indentation level. type textWriter struct { ind int complete bool // if the current position is a complete line compact bool // whether to write out as a one-liner w writer } func (w *textWriter) WriteString(s string) (n int, err error) { if !strings.Contains(s, "\n") { if !w.compact && w.complete { w.writeIndent() } w.complete = false return io.WriteString(w.w, s) } // WriteString is typically called without newlines, so this // codepath and its copy are rare. We copy to avoid // duplicating all of Write's logic here. return w.Write([]byte(s)) } func (w *textWriter) Write(p []byte) (n int, err error) { newlines := bytes.Count(p, newline) if newlines == 0 { if !w.compact && w.complete { w.writeIndent() } n, err = w.w.Write(p) w.complete = false return n, err } frags := bytes.SplitN(p, newline, newlines+1) if w.compact { for i, frag := range frags { if i > 0 { if err := w.w.WriteByte(' '); err != nil { return n, err } n++ } nn, err := w.w.Write(frag) n += nn if err != nil { return n, err } } return n, nil } for i, frag := range frags { if w.complete { w.writeIndent() } nn, err := w.w.Write(frag) n += nn if err != nil { return n, err } if i+1 < len(frags) { if err := w.w.WriteByte('\n'); err != nil { return n, err } n++ } } w.complete = len(frags[len(frags)-1]) == 0 return n, nil } func (w *textWriter) WriteByte(c byte) error { if w.compact && c == '\n' { c = ' ' } if !w.compact && w.complete { w.writeIndent() } err := w.w.WriteByte(c) w.complete = c == '\n' return err } func (w *textWriter) indent() { w.ind++ } func (w *textWriter) unindent() { if w.ind == 0 { log.Printf("proto: textWriter unindented too far") return } w.ind-- } func writeName(w *textWriter, props *Properties) error { if _, err := w.WriteString(props.OrigName); err != nil { return err } if props.Wire != "group" { return w.WriteByte(':') } return nil } var ( messageSetType = reflect.TypeOf((*MessageSet)(nil)).Elem() ) // raw is the interface satisfied by RawMessage. type raw interface { Bytes() []byte } func writeStruct(w *textWriter, sv reflect.Value) error { if sv.Type() == messageSetType { return writeMessageSet(w, sv.Addr().Interface().(*MessageSet)) } st := sv.Type() sprops := GetProperties(st) for i := 0; i < sv.NumField(); i++ { fv := sv.Field(i) props := sprops.Prop[i] name := st.Field(i).Name if strings.HasPrefix(name, "XXX_") { // There are two XXX_ fields: // XXX_unrecognized []byte // XXX_extensions map[int32]proto.Extension // The first is handled here; // the second is handled at the bottom of this function. if name == "XXX_unrecognized" && !fv.IsNil() { if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { return err } } continue } if fv.Kind() == reflect.Ptr && fv.IsNil() { // Field not filled in. This could be an optional field or // a required field that wasn't filled in. Either way, there // isn't anything we can show for it. continue } if fv.Kind() == reflect.Slice && fv.IsNil() { // Repeated field that is empty, or a bytes field that is unused. continue } if props.Repeated && fv.Kind() == reflect.Slice { // Repeated field. for j := 0; j < fv.Len(); j++ { if err := writeName(w, props); err != nil { return err } if !w.compact { if err := w.WriteByte(' '); err != nil { return err } } v := fv.Index(j) if v.Kind() == reflect.Ptr && v.IsNil() { // A nil message in a repeated field is not valid, // but we can handle that more gracefully than panicking. if _, err := w.Write([]byte("\n")); err != nil { return err } continue } if err := writeAny(w, v, props); err != nil { return err } if err := w.WriteByte('\n'); err != nil { return err } } continue } if fv.Kind() == reflect.Map { // Map fields are rendered as a repeated struct with key/value fields. keys := fv.MapKeys() // TODO: should we sort these for deterministic output? sort.Sort(mapKeys(keys)) for _, key := range keys { val := fv.MapIndex(key) if err := writeName(w, props); err != nil { return err } if !w.compact { if err := w.WriteByte(' '); err != nil { return err } } // open struct if err := w.WriteByte('<'); err != nil { return err } if !w.compact { if err := w.WriteByte('\n'); err != nil { return err } } w.indent() // key if _, err := w.WriteString("key:"); err != nil { return err } if !w.compact { if err := w.WriteByte(' '); err != nil { return err } } if err := writeAny(w, key, props.mkeyprop); err != nil { return err } if err := w.WriteByte('\n'); err != nil { return err } // nil values aren't legal, but we can avoid panicking because of them. if val.Kind() != reflect.Ptr || !val.IsNil() { // value if _, err := w.WriteString("value:"); err != nil { return err } if !w.compact { if err := w.WriteByte(' '); err != nil { return err } } if err := writeAny(w, val, props.mvalprop); err != nil { return err } if err := w.WriteByte('\n'); err != nil { return err } } // close struct w.unindent() if err := w.WriteByte('>'); err != nil { return err } if err := w.WriteByte('\n'); err != nil { return err } } continue } if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { // empty bytes field continue } if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { // proto3 non-repeated scalar field; skip if zero value switch fv.Kind() { case reflect.Bool: if !fv.Bool() { continue } case reflect.Int32, reflect.Int64: if fv.Int() == 0 { continue } case reflect.Uint32, reflect.Uint64: if fv.Uint() == 0 { continue } case reflect.Float32, reflect.Float64: if fv.Float() == 0 { continue } case reflect.String: if fv.String() == "" { continue } } } if err := writeName(w, props); err != nil { return err } if !w.compact { if err := w.WriteByte(' '); err != nil { return err } } if b, ok := fv.Interface().(raw); ok { if err := writeRaw(w, b.Bytes()); err != nil { return err } continue } // Enums have a String method, so writeAny will work fine. if err := writeAny(w, fv, props); err != nil { return err } if err := w.WriteByte('\n'); err != nil { return err } } // Extensions (the XXX_extensions field). pv := sv.Addr() if pv.Type().Implements(extendableProtoType) { if err := writeExtensions(w, pv); err != nil { return err } } return nil } // writeRaw writes an uninterpreted raw message. func writeRaw(w *textWriter, b []byte) error { if err := w.WriteByte('<'); err != nil { return err } if !w.compact { if err := w.WriteByte('\n'); err != nil { return err } } w.indent() if err := writeUnknownStruct(w, b); err != nil { return err } w.unindent() if err := w.WriteByte('>'); err != nil { return err } return nil } // writeAny writes an arbitrary field. func writeAny(w *textWriter, v reflect.Value, props *Properties) error { v = reflect.Indirect(v) // Floats have special cases. if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { x := v.Float() var b []byte switch { case math.IsInf(x, 1): b = posInf case math.IsInf(x, -1): b = negInf case math.IsNaN(x): b = nan } if b != nil { _, err := w.Write(b) return err } // Other values are handled below. } // We don't attempt to serialise every possible value type; only those // that can occur in protocol buffers. switch v.Kind() { case reflect.Slice: // Should only be a []byte; repeated fields are handled in writeStruct. if err := writeString(w, string(v.Interface().([]byte))); err != nil { return err } case reflect.String: if err := writeString(w, v.String()); err != nil { return err } case reflect.Struct: // Required/optional group/message. var bra, ket byte = '<', '>' if props != nil && props.Wire == "group" { bra, ket = '{', '}' } if err := w.WriteByte(bra); err != nil { return err } if !w.compact { if err := w.WriteByte('\n'); err != nil { return err } } w.indent() if tm, ok := v.Interface().(encoding.TextMarshaler); ok { text, err := tm.MarshalText() if err != nil { return err } if _, err = w.Write(text); err != nil { return err } } else if err := writeStruct(w, v); err != nil { return err } w.unindent() if err := w.WriteByte(ket); err != nil { return err } default: _, err := fmt.Fprint(w, v.Interface()) return err } return nil } // equivalent to C's isprint. func isprint(c byte) bool { return c >= 0x20 && c < 0x7f } // writeString writes a string in the protocol buffer text format. // It is similar to strconv.Quote except we don't use Go escape sequences, // we treat the string as a byte sequence, and we use octal escapes. // These differences are to maintain interoperability with the other // languages' implementations of the text format. func writeString(w *textWriter, s string) error { // use WriteByte here to get any needed indent if err := w.WriteByte('"'); err != nil { return err } // Loop over the bytes, not the runes. for i := 0; i < len(s); i++ { var err error // Divergence from C++: we don't escape apostrophes. // There's no need to escape them, and the C++ parser // copes with a naked apostrophe. switch c := s[i]; c { case '\n': _, err = w.w.Write(backslashN) case '\r': _, err = w.w.Write(backslashR) case '\t': _, err = w.w.Write(backslashT) case '"': _, err = w.w.Write(backslashDQ) case '\\': _, err = w.w.Write(backslashBS) default: if isprint(c) { err = w.w.WriteByte(c) } else { _, err = fmt.Fprintf(w.w, "\\%03o", c) } } if err != nil { return err } } return w.WriteByte('"') } func writeMessageSet(w *textWriter, ms *MessageSet) error { for _, item := range ms.Item { id := *item.TypeId if msd, ok := messageSetMap[id]; ok { // Known message set type. if _, err := fmt.Fprintf(w, "[%s]: <\n", msd.name); err != nil { return err } w.indent() pb := reflect.New(msd.t.Elem()) if err := Unmarshal(item.Message, pb.Interface().(Message)); err != nil { if _, err := fmt.Fprintf(w, "/* bad message: %v */\n", err); err != nil { return err } } else { if err := writeStruct(w, pb.Elem()); err != nil { return err } } } else { // Unknown type. if _, err := fmt.Fprintf(w, "[%d]: <\n", id); err != nil { return err } w.indent() if err := writeUnknownStruct(w, item.Message); err != nil { return err } } w.unindent() if _, err := w.Write(gtNewline); err != nil { return err } } return nil } func writeUnknownStruct(w *textWriter, data []byte) (err error) { if !w.compact { if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { return err } } b := NewBuffer(data) for b.index < len(b.buf) { x, err := b.DecodeVarint() if err != nil { _, err := fmt.Fprintf(w, "/* %v */\n", err) return err } wire, tag := x&7, x>>3 if wire == WireEndGroup { w.unindent() if _, err := w.Write(endBraceNewline); err != nil { return err } continue } if _, err := fmt.Fprint(w, tag); err != nil { return err } if wire != WireStartGroup { if err := w.WriteByte(':'); err != nil { return err } } if !w.compact || wire == WireStartGroup { if err := w.WriteByte(' '); err != nil { return err } } switch wire { case WireBytes: buf, e := b.DecodeRawBytes(false) if e == nil { _, err = fmt.Fprintf(w, "%q", buf) } else { _, err = fmt.Fprintf(w, "/* %v */", e) } case WireFixed32: x, err = b.DecodeFixed32() err = writeUnknownInt(w, x, err) case WireFixed64: x, err = b.DecodeFixed64() err = writeUnknownInt(w, x, err) case WireStartGroup: err = w.WriteByte('{') w.indent() case WireVarint: x, err = b.DecodeVarint() err = writeUnknownInt(w, x, err) default: _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) } if err != nil { return err } if err = w.WriteByte('\n'); err != nil { return err } } return nil } func writeUnknownInt(w *textWriter, x uint64, err error) error { if err == nil { _, err = fmt.Fprint(w, x) } else { _, err = fmt.Fprintf(w, "/* %v */", err) } return err } type int32Slice []int32 func (s int32Slice) Len() int { return len(s) } func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } // writeExtensions writes all the extensions in pv. // pv is assumed to be a pointer to a protocol message struct that is extendable. func writeExtensions(w *textWriter, pv reflect.Value) error { emap := extensionMaps[pv.Type().Elem()] ep := pv.Interface().(extendableProto) // Order the extensions by ID. // This isn't strictly necessary, but it will give us // canonical output, which will also make testing easier. m := ep.ExtensionMap() ids := make([]int32, 0, len(m)) for id := range m { ids = append(ids, id) } sort.Sort(int32Slice(ids)) for _, extNum := range ids { ext := m[extNum] var desc *ExtensionDesc if emap != nil { desc = emap[extNum] } if desc == nil { // Unknown extension. if err := writeUnknownStruct(w, ext.enc); err != nil { return err } continue } pb, err := GetExtension(ep, desc) if err != nil { if _, err := fmt.Fprintln(os.Stderr, "proto: failed getting extension: ", err); err != nil { return err } continue } // Repeated extensions will appear as a slice. if !desc.repeated() { if err := writeExtension(w, desc.Name, pb); err != nil { return err } } else { v := reflect.ValueOf(pb) for i := 0; i < v.Len(); i++ { if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { return err } } } } return nil } func writeExtension(w *textWriter, name string, pb interface{}) error { if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { return err } if !w.compact { if err := w.WriteByte(' '); err != nil { return err } } if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil { return err } if err := w.WriteByte('\n'); err != nil { return err } return nil } func (w *textWriter) writeIndent() { if !w.complete { return } remain := w.ind * 2 for remain > 0 { n := remain if n > len(spaces) { n = len(spaces) } w.w.Write(spaces[:n]) remain -= n } w.complete = false } func marshalText(w io.Writer, pb Message, compact bool) error { val := reflect.ValueOf(pb) if pb == nil || val.IsNil() { w.Write([]byte("")) return nil } var bw *bufio.Writer ww, ok := w.(writer) if !ok { bw = bufio.NewWriter(w) ww = bw } aw := &textWriter{ w: ww, complete: true, compact: compact, } if tm, ok := pb.(encoding.TextMarshaler); ok { text, err := tm.MarshalText() if err != nil { return err } if _, err = aw.Write(text); err != nil { return err } if bw != nil { return bw.Flush() } return nil } // Dereference the received pointer so we don't have outer < and >. v := reflect.Indirect(val) if err := writeStruct(aw, v); err != nil { return err } if bw != nil { return bw.Flush() } return nil } // MarshalText writes a given protocol buffer in text format. // The only errors returned are from w. func MarshalText(w io.Writer, pb Message) error { return marshalText(w, pb, false) } // MarshalTextString is the same as MarshalText, but returns the string directly. func MarshalTextString(pb Message) string { var buf bytes.Buffer marshalText(&buf, pb, false) return buf.String() } // CompactText writes a given protocol buffer in compact text format (one line). func CompactText(w io.Writer, pb Message) error { return marshalText(w, pb, true) } // CompactTextString is the same as CompactText, but returns the string directly. func CompactTextString(pb Message) string { var buf bytes.Buffer marshalText(&buf, pb, true) return buf.String() } ================================================ FILE: vendor/github.com/golang/protobuf/proto/text_parser.go ================================================ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package proto // Functions for parsing the Text protocol buffer format. // TODO: message sets. import ( "encoding" "errors" "fmt" "reflect" "strconv" "strings" "unicode/utf8" ) type ParseError struct { Message string Line int // 1-based line number Offset int // 0-based byte offset from start of input } func (p *ParseError) Error() string { if p.Line == 1 { // show offset only for first line return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) } return fmt.Sprintf("line %d: %v", p.Line, p.Message) } type token struct { value string err *ParseError line int // line number offset int // byte number from start of input, not start of line unquoted string // the unquoted version of value, if it was a quoted string } func (t *token) String() string { if t.err == nil { return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) } return fmt.Sprintf("parse error: %v", t.err) } type textParser struct { s string // remaining input done bool // whether the parsing is finished (success or error) backed bool // whether back() was called offset, line int cur token } func newTextParser(s string) *textParser { p := new(textParser) p.s = s p.line = 1 p.cur.line = 1 return p } func (p *textParser) errorf(format string, a ...interface{}) *ParseError { pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} p.cur.err = pe p.done = true return pe } // Numbers and identifiers are matched by [-+._A-Za-z0-9] func isIdentOrNumberChar(c byte) bool { switch { case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': return true case '0' <= c && c <= '9': return true } switch c { case '-', '+', '.', '_': return true } return false } func isWhitespace(c byte) bool { switch c { case ' ', '\t', '\n', '\r': return true } return false } func (p *textParser) skipWhitespace() { i := 0 for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { if p.s[i] == '#' { // comment; skip to end of line or input for i < len(p.s) && p.s[i] != '\n' { i++ } if i == len(p.s) { break } } if p.s[i] == '\n' { p.line++ } i++ } p.offset += i p.s = p.s[i:len(p.s)] if len(p.s) == 0 { p.done = true } } func (p *textParser) advance() { // Skip whitespace p.skipWhitespace() if p.done { return } // Start of non-whitespace p.cur.err = nil p.cur.offset, p.cur.line = p.offset, p.line p.cur.unquoted = "" switch p.s[0] { case '<', '>', '{', '}', ':', '[', ']', ';', ',': // Single symbol p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] case '"', '\'': // Quoted string i := 1 for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { if p.s[i] == '\\' && i+1 < len(p.s) { // skip escaped char i++ } i++ } if i >= len(p.s) || p.s[i] != p.s[0] { p.errorf("unmatched quote") return } unq, err := unquoteC(p.s[1:i], rune(p.s[0])) if err != nil { p.errorf("invalid quoted string %v", p.s[0:i+1]) return } p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] p.cur.unquoted = unq default: i := 0 for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { i++ } if i == 0 { p.errorf("unexpected byte %#x", p.s[0]) return } p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] } p.offset += len(p.cur.value) } var ( errBadUTF8 = errors.New("proto: bad UTF-8") errBadHex = errors.New("proto: bad hexadecimal") ) func unquoteC(s string, quote rune) (string, error) { // This is based on C++'s tokenizer.cc. // Despite its name, this is *not* parsing C syntax. // For instance, "\0" is an invalid quoted string. // Avoid allocation in trivial cases. simple := true for _, r := range s { if r == '\\' || r == quote { simple = false break } } if simple { return s, nil } buf := make([]byte, 0, 3*len(s)/2) for len(s) > 0 { r, n := utf8.DecodeRuneInString(s) if r == utf8.RuneError && n == 1 { return "", errBadUTF8 } s = s[n:] if r != '\\' { if r < utf8.RuneSelf { buf = append(buf, byte(r)) } else { buf = append(buf, string(r)...) } continue } ch, tail, err := unescape(s) if err != nil { return "", err } buf = append(buf, ch...) s = tail } return string(buf), nil } func unescape(s string) (ch string, tail string, err error) { r, n := utf8.DecodeRuneInString(s) if r == utf8.RuneError && n == 1 { return "", "", errBadUTF8 } s = s[n:] switch r { case 'a': return "\a", s, nil case 'b': return "\b", s, nil case 'f': return "\f", s, nil case 'n': return "\n", s, nil case 'r': return "\r", s, nil case 't': return "\t", s, nil case 'v': return "\v", s, nil case '?': return "?", s, nil // trigraph workaround case '\'', '"', '\\': return string(r), s, nil case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': if len(s) < 2 { return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) } base := 8 ss := s[:2] s = s[2:] if r == 'x' || r == 'X' { base = 16 } else { ss = string(r) + ss } i, err := strconv.ParseUint(ss, base, 8) if err != nil { return "", "", err } return string([]byte{byte(i)}), s, nil case 'u', 'U': n := 4 if r == 'U' { n = 8 } if len(s) < n { return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) } bs := make([]byte, n/2) for i := 0; i < n; i += 2 { a, ok1 := unhex(s[i]) b, ok2 := unhex(s[i+1]) if !ok1 || !ok2 { return "", "", errBadHex } bs[i/2] = a<<4 | b } s = s[n:] return string(bs), s, nil } return "", "", fmt.Errorf(`unknown escape \%c`, r) } // Adapted from src/pkg/strconv/quote.go. func unhex(b byte) (v byte, ok bool) { switch { case '0' <= b && b <= '9': return b - '0', true case 'a' <= b && b <= 'f': return b - 'a' + 10, true case 'A' <= b && b <= 'F': return b - 'A' + 10, true } return 0, false } // Back off the parser by one token. Can only be done between calls to next(). // It makes the next advance() a no-op. func (p *textParser) back() { p.backed = true } // Advances the parser and returns the new current token. func (p *textParser) next() *token { if p.backed || p.done { p.backed = false return &p.cur } p.advance() if p.done { p.cur.value = "" } else if len(p.cur.value) > 0 && p.cur.value[0] == '"' { // Look for multiple quoted strings separated by whitespace, // and concatenate them. cat := p.cur for { p.skipWhitespace() if p.done || p.s[0] != '"' { break } p.advance() if p.cur.err != nil { return &p.cur } cat.value += " " + p.cur.value cat.unquoted += p.cur.unquoted } p.done = false // parser may have seen EOF, but we want to return cat p.cur = cat } return &p.cur } func (p *textParser) consumeToken(s string) error { tok := p.next() if tok.err != nil { return tok.err } if tok.value != s { p.back() return p.errorf("expected %q, found %q", s, tok.value) } return nil } // Return a RequiredNotSetError indicating which required field was not set. func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { st := sv.Type() sprops := GetProperties(st) for i := 0; i < st.NumField(); i++ { if !isNil(sv.Field(i)) { continue } props := sprops.Prop[i] if props.Required { return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} } } return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen } // Returns the index in the struct for the named field, as well as the parsed tag properties. func structFieldByName(st reflect.Type, name string) (int, *Properties, bool) { sprops := GetProperties(st) i, ok := sprops.decoderOrigNames[name] if ok { return i, sprops.Prop[i], true } return -1, nil, false } // Consume a ':' from the input stream (if the next token is a colon), // returning an error if a colon is needed but not present. func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { tok := p.next() if tok.err != nil { return tok.err } if tok.value != ":" { // Colon is optional when the field is a group or message. needColon := true switch props.Wire { case "group": needColon = false case "bytes": // A "bytes" field is either a message, a string, or a repeated field; // those three become *T, *string and []T respectively, so we can check for // this field being a pointer to a non-string. if typ.Kind() == reflect.Ptr { // *T or *string if typ.Elem().Kind() == reflect.String { break } } else if typ.Kind() == reflect.Slice { // []T or []*T if typ.Elem().Kind() != reflect.Ptr { break } } else if typ.Kind() == reflect.String { // The proto3 exception is for a string field, // which requires a colon. break } needColon = false } if needColon { return p.errorf("expected ':', found %q", tok.value) } p.back() } return nil } func (p *textParser) readStruct(sv reflect.Value, terminator string) error { st := sv.Type() reqCount := GetProperties(st).reqCount var reqFieldErr error fieldSet := make(map[string]bool) // A struct is a sequence of "name: value", terminated by one of // '>' or '}', or the end of the input. A name may also be // "[extension]". for { tok := p.next() if tok.err != nil { return tok.err } if tok.value == terminator { break } if tok.value == "[" { // Looks like an extension. // // TODO: Check whether we need to handle // namespace rooted names (e.g. ".something.Foo"). tok = p.next() if tok.err != nil { return tok.err } var desc *ExtensionDesc // This could be faster, but it's functional. // TODO: Do something smarter than a linear scan. for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { if d.Name == tok.value { desc = d break } } if desc == nil { return p.errorf("unrecognized extension %q", tok.value) } // Check the extension terminator. tok = p.next() if tok.err != nil { return tok.err } if tok.value != "]" { return p.errorf("unrecognized extension terminator %q", tok.value) } props := &Properties{} props.Parse(desc.Tag) typ := reflect.TypeOf(desc.ExtensionType) if err := p.checkForColon(props, typ); err != nil { return err } rep := desc.repeated() // Read the extension structure, and set it in // the value we're constructing. var ext reflect.Value if !rep { ext = reflect.New(typ).Elem() } else { ext = reflect.New(typ.Elem()).Elem() } if err := p.readAny(ext, props); err != nil { if _, ok := err.(*RequiredNotSetError); !ok { return err } reqFieldErr = err } ep := sv.Addr().Interface().(extendableProto) if !rep { SetExtension(ep, desc, ext.Interface()) } else { old, err := GetExtension(ep, desc) var sl reflect.Value if err == nil { sl = reflect.ValueOf(old) // existing slice } else { sl = reflect.MakeSlice(typ, 0, 1) } sl = reflect.Append(sl, ext) SetExtension(ep, desc, sl.Interface()) } } else { // This is a normal, non-extension field. name := tok.value fi, props, ok := structFieldByName(st, name) if !ok { return p.errorf("unknown field name %q in %v", name, st) } dst := sv.Field(fi) if dst.Kind() == reflect.Map { // Consume any colon. if err := p.checkForColon(props, dst.Type()); err != nil { return err } // Construct the map if it doesn't already exist. if dst.IsNil() { dst.Set(reflect.MakeMap(dst.Type())) } key := reflect.New(dst.Type().Key()).Elem() val := reflect.New(dst.Type().Elem()).Elem() // The map entry should be this sequence of tokens: // < key : KEY value : VALUE > // Technically the "key" and "value" could come in any order, // but in practice they won't. tok := p.next() var terminator string switch tok.value { case "<": terminator = ">" case "{": terminator = "}" default: return p.errorf("expected '{' or '<', found %q", tok.value) } if err := p.consumeToken("key"); err != nil { return err } if err := p.consumeToken(":"); err != nil { return err } if err := p.readAny(key, props.mkeyprop); err != nil { return err } if err := p.consumeOptionalSeparator(); err != nil { return err } if err := p.consumeToken("value"); err != nil { return err } if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { return err } if err := p.readAny(val, props.mvalprop); err != nil { return err } if err := p.consumeOptionalSeparator(); err != nil { return err } if err := p.consumeToken(terminator); err != nil { return err } dst.SetMapIndex(key, val) continue } // Check that it's not already set if it's not a repeated field. if !props.Repeated && fieldSet[name] { return p.errorf("non-repeated field %q was repeated", name) } if err := p.checkForColon(props, st.Field(fi).Type); err != nil { return err } // Parse into the field. fieldSet[name] = true if err := p.readAny(dst, props); err != nil { if _, ok := err.(*RequiredNotSetError); !ok { return err } reqFieldErr = err } else if props.Required { reqCount-- } } if err := p.consumeOptionalSeparator(); err != nil { return err } } if reqCount > 0 { return p.missingRequiredFieldError(sv) } return reqFieldErr } // consumeOptionalSeparator consumes an optional semicolon or comma. // It is used in readStruct to provide backward compatibility. func (p *textParser) consumeOptionalSeparator() error { tok := p.next() if tok.err != nil { return tok.err } if tok.value != ";" && tok.value != "," { p.back() } return nil } func (p *textParser) readAny(v reflect.Value, props *Properties) error { tok := p.next() if tok.err != nil { return tok.err } if tok.value == "" { return p.errorf("unexpected EOF") } switch fv := v; fv.Kind() { case reflect.Slice: at := v.Type() if at.Elem().Kind() == reflect.Uint8 { // Special case for []byte if tok.value[0] != '"' && tok.value[0] != '\'' { // Deliberately written out here, as the error after // this switch statement would write "invalid []byte: ...", // which is not as user-friendly. return p.errorf("invalid string: %v", tok.value) } bytes := []byte(tok.unquoted) fv.Set(reflect.ValueOf(bytes)) return nil } // Repeated field. May already exist. flen := fv.Len() if flen == fv.Cap() { nav := reflect.MakeSlice(at, flen, 2*flen+1) reflect.Copy(nav, fv) fv.Set(nav) } fv.SetLen(flen + 1) // Read one. p.back() return p.readAny(fv.Index(flen), props) case reflect.Bool: // Either "true", "false", 1 or 0. switch tok.value { case "true", "1": fv.SetBool(true) return nil case "false", "0": fv.SetBool(false) return nil } case reflect.Float32, reflect.Float64: v := tok.value // Ignore 'f' for compatibility with output generated by C++, but don't // remove 'f' when the value is "-inf" or "inf". if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { v = v[:len(v)-1] } if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { fv.SetFloat(f) return nil } case reflect.Int32: if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { fv.SetInt(x) return nil } if len(props.Enum) == 0 { break } m, ok := enumValueMaps[props.Enum] if !ok { break } x, ok := m[tok.value] if !ok { break } fv.SetInt(int64(x)) return nil case reflect.Int64: if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { fv.SetInt(x) return nil } case reflect.Ptr: // A basic field (indirected through pointer), or a repeated message/group p.back() fv.Set(reflect.New(fv.Type().Elem())) return p.readAny(fv.Elem(), props) case reflect.String: if tok.value[0] == '"' || tok.value[0] == '\'' { fv.SetString(tok.unquoted) return nil } case reflect.Struct: var terminator string switch tok.value { case "{": terminator = "}" case "<": terminator = ">" default: return p.errorf("expected '{' or '<', found %q", tok.value) } // TODO: Handle nested messages which implement encoding.TextUnmarshaler. return p.readStruct(fv, terminator) case reflect.Uint32: if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { fv.SetUint(uint64(x)) return nil } case reflect.Uint64: if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { fv.SetUint(x) return nil } } return p.errorf("invalid %v: %v", v.Type(), tok.value) } // UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb // before starting to unmarshal, so any existing data in pb is always removed. // If a required field is not set and no other error occurs, // UnmarshalText returns *RequiredNotSetError. func UnmarshalText(s string, pb Message) error { if um, ok := pb.(encoding.TextUnmarshaler); ok { err := um.UnmarshalText([]byte(s)) return err } pb.Reset() v := reflect.ValueOf(pb) if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { return pe } return nil } ================================================ FILE: vendor/github.com/gorilla/context/.travis.yml ================================================ language: go go: - 1.0 - 1.1 - 1.2 - 1.3 - 1.4 - tip ================================================ FILE: vendor/github.com/gorilla/context/LICENSE ================================================ Copyright (c) 2012 Rodrigo Moraes. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================ FILE: vendor/github.com/gorilla/context/README.md ================================================ context ======= [![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context) gorilla/context is a general purpose registry for global request variables. Read the full documentation here: http://www.gorillatoolkit.org/pkg/context ================================================ FILE: vendor/github.com/gorilla/context/context.go ================================================ // Copyright 2012 The Gorilla Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package context import ( "net/http" "sync" "time" ) var ( mutex sync.RWMutex data = make(map[*http.Request]map[interface{}]interface{}) datat = make(map[*http.Request]int64) ) // Set stores a value for a given key in a given request. func Set(r *http.Request, key, val interface{}) { mutex.Lock() if data[r] == nil { data[r] = make(map[interface{}]interface{}) datat[r] = time.Now().Unix() } data[r][key] = val mutex.Unlock() } // Get returns a value stored for a given key in a given request. func Get(r *http.Request, key interface{}) interface{} { mutex.RLock() if ctx := data[r]; ctx != nil { value := ctx[key] mutex.RUnlock() return value } mutex.RUnlock() return nil } // GetOk returns stored value and presence state like multi-value return of map access. func GetOk(r *http.Request, key interface{}) (interface{}, bool) { mutex.RLock() if _, ok := data[r]; ok { value, ok := data[r][key] mutex.RUnlock() return value, ok } mutex.RUnlock() return nil, false } // GetAll returns all stored values for the request as a map. Nil is returned for invalid requests. func GetAll(r *http.Request) map[interface{}]interface{} { mutex.RLock() if context, ok := data[r]; ok { result := make(map[interface{}]interface{}, len(context)) for k, v := range context { result[k] = v } mutex.RUnlock() return result } mutex.RUnlock() return nil } // GetAllOk returns all stored values for the request as a map and a boolean value that indicates if // the request was registered. func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) { mutex.RLock() context, ok := data[r] result := make(map[interface{}]interface{}, len(context)) for k, v := range context { result[k] = v } mutex.RUnlock() return result, ok } // Delete removes a value stored for a given key in a given request. func Delete(r *http.Request, key interface{}) { mutex.Lock() if data[r] != nil { delete(data[r], key) } mutex.Unlock() } // Clear removes all values stored for a given request. // // This is usually called by a handler wrapper to clean up request // variables at the end of a request lifetime. See ClearHandler(). func Clear(r *http.Request) { mutex.Lock() clear(r) mutex.Unlock() } // clear is Clear without the lock. func clear(r *http.Request) { delete(data, r) delete(datat, r) } // Purge removes request data stored for longer than maxAge, in seconds. // It returns the amount of requests removed. // // If maxAge <= 0, all request data is removed. // // This is only used for sanity check: in case context cleaning was not // properly set some request data can be kept forever, consuming an increasing // amount of memory. In case this is detected, Purge() must be called // periodically until the problem is fixed. func Purge(maxAge int) int { mutex.Lock() count := 0 if maxAge <= 0 { count = len(data) data = make(map[*http.Request]map[interface{}]interface{}) datat = make(map[*http.Request]int64) } else { min := time.Now().Unix() - int64(maxAge) for r := range data { if datat[r] < min { clear(r) count++ } } } mutex.Unlock() return count } // ClearHandler wraps an http.Handler and clears request values at the end // of a request lifetime. func ClearHandler(h http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { defer Clear(r) h.ServeHTTP(w, r) }) } ================================================ FILE: vendor/github.com/gorilla/context/doc.go ================================================ // Copyright 2012 The Gorilla Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. /* Package context stores values shared during a request lifetime. For example, a router can set variables extracted from the URL and later application handlers can access those values, or it can be used to store sessions values to be saved at the end of a request. There are several others common uses. The idea was posted by Brad Fitzpatrick to the go-nuts mailing list: http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53 Here's the basic usage: first define the keys that you will need. The key type is interface{} so a key can be of any type that supports equality. Here we define a key using a custom int type to avoid name collisions: package foo import ( "github.com/gorilla/context" ) type key int const MyKey key = 0 Then set a variable. Variables are bound to an http.Request object, so you need a request instance to set a value: context.Set(r, MyKey, "bar") The application can later access the variable using the same key you provided: func MyHandler(w http.ResponseWriter, r *http.Request) { // val is "bar". val := context.Get(r, foo.MyKey) // returns ("bar", true) val, ok := context.GetOk(r, foo.MyKey) // ... } And that's all about the basic usage. We discuss some other ideas below. Any type can be stored in the context. To enforce a given type, make the key private and wrap Get() and Set() to accept and return values of a specific type: type key int const mykey key = 0 // GetMyKey returns a value for this package from the request values. func GetMyKey(r *http.Request) SomeType { if rv := context.Get(r, mykey); rv != nil { return rv.(SomeType) } return nil } // SetMyKey sets a value for this package in the request values. func SetMyKey(r *http.Request, val SomeType) { context.Set(r, mykey, val) } Variables must be cleared at the end of a request, to remove all values that were stored. This can be done in an http.Handler, after a request was served. Just call Clear() passing the request: context.Clear(r) ...or use ClearHandler(), which conveniently wraps an http.Handler to clear variables at the end of a request lifetime. The Routers from the packages gorilla/mux and gorilla/pat call Clear() so if you are using either of them you don't need to clear the context manually. */ package context ================================================ FILE: vendor/github.com/gorilla/mux/.travis.yml ================================================ language: go go: - 1.0 - 1.1 - 1.2 - tip ================================================ FILE: vendor/github.com/gorilla/mux/LICENSE ================================================ Copyright (c) 2012 Rodrigo Moraes. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================ FILE: vendor/github.com/gorilla/mux/README.md ================================================ mux === [![Build Status](https://travis-ci.org/gorilla/mux.png?branch=master)](https://travis-ci.org/gorilla/mux) gorilla/mux is a powerful URL router and dispatcher. Read the full documentation here: http://www.gorillatoolkit.org/pkg/mux ================================================ FILE: vendor/github.com/gorilla/mux/doc.go ================================================ // Copyright 2012 The Gorilla Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. /* Package gorilla/mux implements a request router and dispatcher. The name mux stands for "HTTP request multiplexer". Like the standard http.ServeMux, mux.Router matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are: * Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers. * URL hosts and paths can have variables with an optional regular expression. * Registered URLs can be built, or "reversed", which helps maintaining references to resources. * Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching. * It implements the http.Handler interface so it is compatible with the standard http.ServeMux. Let's start registering a couple of URL paths and handlers: func main() { r := mux.NewRouter() r.HandleFunc("/", HomeHandler) r.HandleFunc("/products", ProductsHandler) r.HandleFunc("/articles", ArticlesHandler) http.Handle("/", r) } Here we register three routes mapping URL paths to handlers. This is equivalent to how http.HandleFunc() works: if an incoming request URL matches one of the paths, the corresponding handler is called passing (http.ResponseWriter, *http.Request) as parameters. Paths can have variables. They are defined using the format {name} or {name:pattern}. If a regular expression pattern is not defined, the matched variable will be anything until the next slash. For example: r := mux.NewRouter() r.HandleFunc("/products/{key}", ProductHandler) r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) The names are used to create a map of route variables which can be retrieved calling mux.Vars(): vars := mux.Vars(request) category := vars["category"] And this is all you need to know about the basic usage. More advanced options are explained below. Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables: r := mux.NewRouter() // Only matches if domain is "www.domain.com". r.Host("www.domain.com") // Matches a dynamic subdomain. r.Host("{subdomain:[a-z]+}.domain.com") There are several other matchers that can be added. To match path prefixes: r.PathPrefix("/products/") ...or HTTP methods: r.Methods("GET", "POST") ...or URL schemes: r.Schemes("https") ...or header values: r.Headers("X-Requested-With", "XMLHttpRequest") ...or query values: r.Queries("key", "value") ...or to use a custom matcher function: r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { return r.ProtoMajor == 0 }) ...and finally, it is possible to combine several matchers in a single route: r.HandleFunc("/products", ProductsHandler). Host("www.domain.com"). Methods("GET"). Schemes("http") Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting". For example, let's say we have several URLs that should only match when the host is "www.domain.com". Create a route for that host and get a "subrouter" from it: r := mux.NewRouter() s := r.Host("www.domain.com").Subrouter() Then register routes in the subrouter: s.HandleFunc("/products/", ProductsHandler) s.HandleFunc("/products/{key}", ProductHandler) s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) The three URL paths we registered above will only be tested if the domain is "www.domain.com", because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route. Subrouters can be used to create domain or path "namespaces": you define subrouters in a central place and then parts of the app can register its paths relatively to a given subrouter. There's one more thing about subroutes. When a subrouter has a path prefix, the inner routes use it as base for their paths: r := mux.NewRouter() s := r.PathPrefix("/products").Subrouter() // "/products/" s.HandleFunc("/", ProductsHandler) // "/products/{key}/" s.HandleFunc("/{key}/", ProductHandler) // "/products/{key}/details" s.HandleFunc("/{key}/details", ProductDetailsHandler) Now let's see how to build registered URLs. Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling Name() on a route. For example: r := mux.NewRouter() r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). Name("article") To build a URL, get the route and call the URL() method, passing a sequence of key/value pairs for the route variables. For the previous route, we would do: url, err := r.Get("article").URL("category", "technology", "id", "42") ...and the result will be a url.URL with the following path: "/articles/technology/42" This also works for host variables: r := mux.NewRouter() r.Host("{subdomain}.domain.com"). Path("/articles/{category}/{id:[0-9]+}"). HandlerFunc(ArticleHandler). Name("article") // url.String() will be "http://news.domain.com/articles/technology/42" url, err := r.Get("article").URL("subdomain", "news", "category", "technology", "id", "42") All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match. There's also a way to build only the URL host or path for a route: use the methods URLHost() or URLPath() instead. For the previous route, we would do: // "http://news.domain.com/" host, err := r.Get("article").URLHost("subdomain", "news") // "/articles/technology/42" path, err := r.Get("article").URLPath("category", "technology", "id", "42") And if you use subrouters, host and path defined separately can be built as well: r := mux.NewRouter() s := r.Host("{subdomain}.domain.com").Subrouter() s.Path("/articles/{category}/{id:[0-9]+}"). HandlerFunc(ArticleHandler). Name("article") // "http://news.domain.com/articles/technology/42" url, err := r.Get("article").URL("subdomain", "news", "category", "technology", "id", "42") */ package mux ================================================ FILE: vendor/github.com/gorilla/mux/mux.go ================================================ // Copyright 2012 The Gorilla Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package mux import ( "fmt" "net/http" "path" "github.com/gorilla/context" ) // NewRouter returns a new router instance. func NewRouter() *Router { return &Router{namedRoutes: make(map[string]*Route), KeepContext: false} } // Router registers routes to be matched and dispatches a handler. // // It implements the http.Handler interface, so it can be registered to serve // requests: // // var router = mux.NewRouter() // // func main() { // http.Handle("/", router) // } // // Or, for Google App Engine, register it in a init() function: // // func init() { // http.Handle("/", router) // } // // This will send all incoming requests to the router. type Router struct { // Configurable Handler to be used when no route matches. NotFoundHandler http.Handler // Parent route, if this is a subrouter. parent parentRoute // Routes to be matched, in order. routes []*Route // Routes by name for URL building. namedRoutes map[string]*Route // See Router.StrictSlash(). This defines the flag for new routes. strictSlash bool // If true, do not clear the request context after handling the request KeepContext bool } // Match matches registered routes against the request. func (r *Router) Match(req *http.Request, match *RouteMatch) bool { for _, route := range r.routes { if route.Match(req, match) { return true } } return false } // ServeHTTP dispatches the handler registered in the matched route. // // When there is a match, the route variables can be retrieved calling // mux.Vars(request). func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { // Clean path to canonical form and redirect. if p := cleanPath(req.URL.Path); p != req.URL.Path { // Added 3 lines (Philip Schlump) - It was droping the query string and #whatever from query. // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: // http://code.google.com/p/go/issues/detail?id=5252 url := *req.URL url.Path = p p = url.String() w.Header().Set("Location", p) w.WriteHeader(http.StatusMovedPermanently) return } var match RouteMatch var handler http.Handler if r.Match(req, &match) { handler = match.Handler setVars(req, match.Vars) setCurrentRoute(req, match.Route) } if handler == nil { handler = r.NotFoundHandler if handler == nil { handler = http.NotFoundHandler() } } if !r.KeepContext { defer context.Clear(req) } handler.ServeHTTP(w, req) } // Get returns a route registered with the given name. func (r *Router) Get(name string) *Route { return r.getNamedRoutes()[name] } // GetRoute returns a route registered with the given name. This method // was renamed to Get() and remains here for backwards compatibility. func (r *Router) GetRoute(name string) *Route { return r.getNamedRoutes()[name] } // StrictSlash defines the trailing slash behavior for new routes. The initial // value is false. // // When true, if the route path is "/path/", accessing "/path" will redirect // to the former and vice versa. In other words, your application will always // see the path as specified in the route. // // When false, if the route path is "/path", accessing "/path/" will not match // this route and vice versa. // // Special case: when a route sets a path prefix using the PathPrefix() method, // strict slash is ignored for that route because the redirect behavior can't // be determined from a prefix alone. However, any subrouters created from that // route inherit the original StrictSlash setting. func (r *Router) StrictSlash(value bool) *Router { r.strictSlash = value return r } // ---------------------------------------------------------------------------- // parentRoute // ---------------------------------------------------------------------------- // getNamedRoutes returns the map where named routes are registered. func (r *Router) getNamedRoutes() map[string]*Route { if r.namedRoutes == nil { if r.parent != nil { r.namedRoutes = r.parent.getNamedRoutes() } else { r.namedRoutes = make(map[string]*Route) } } return r.namedRoutes } // getRegexpGroup returns regexp definitions from the parent route, if any. func (r *Router) getRegexpGroup() *routeRegexpGroup { if r.parent != nil { return r.parent.getRegexpGroup() } return nil } func (r *Router) buildVars(m map[string]string) map[string]string { if r.parent != nil { m = r.parent.buildVars(m) } return m } // ---------------------------------------------------------------------------- // Route factories // ---------------------------------------------------------------------------- // NewRoute registers an empty route. func (r *Router) NewRoute() *Route { route := &Route{parent: r, strictSlash: r.strictSlash} r.routes = append(r.routes, route) return route } // Handle registers a new route with a matcher for the URL path. // See Route.Path() and Route.Handler(). func (r *Router) Handle(path string, handler http.Handler) *Route { return r.NewRoute().Path(path).Handler(handler) } // HandleFunc registers a new route with a matcher for the URL path. // See Route.Path() and Route.HandlerFunc(). func (r *Router) HandleFunc(path string, f func(http.ResponseWriter, *http.Request)) *Route { return r.NewRoute().Path(path).HandlerFunc(f) } // Headers registers a new route with a matcher for request header values. // See Route.Headers(). func (r *Router) Headers(pairs ...string) *Route { return r.NewRoute().Headers(pairs...) } // Host registers a new route with a matcher for the URL host. // See Route.Host(). func (r *Router) Host(tpl string) *Route { return r.NewRoute().Host(tpl) } // MatcherFunc registers a new route with a custom matcher function. // See Route.MatcherFunc(). func (r *Router) MatcherFunc(f MatcherFunc) *Route { return r.NewRoute().MatcherFunc(f) } // Methods registers a new route with a matcher for HTTP methods. // See Route.Methods(). func (r *Router) Methods(methods ...string) *Route { return r.NewRoute().Methods(methods...) } // Path registers a new route with a matcher for the URL path. // See Route.Path(). func (r *Router) Path(tpl string) *Route { return r.NewRoute().Path(tpl) } // PathPrefix registers a new route with a matcher for the URL path prefix. // See Route.PathPrefix(). func (r *Router) PathPrefix(tpl string) *Route { return r.NewRoute().PathPrefix(tpl) } // Queries registers a new route with a matcher for URL query values. // See Route.Queries(). func (r *Router) Queries(pairs ...string) *Route { return r.NewRoute().Queries(pairs...) } // Schemes registers a new route with a matcher for URL schemes. // See Route.Schemes(). func (r *Router) Schemes(schemes ...string) *Route { return r.NewRoute().Schemes(schemes...) } // BuildVars registers a new route with a custom function for modifying // route variables before building a URL. func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route { return r.NewRoute().BuildVarsFunc(f) } // ---------------------------------------------------------------------------- // Context // ---------------------------------------------------------------------------- // RouteMatch stores information about a matched route. type RouteMatch struct { Route *Route Handler http.Handler Vars map[string]string } type contextKey int const ( varsKey contextKey = iota routeKey ) // Vars returns the route variables for the current request, if any. func Vars(r *http.Request) map[string]string { if rv := context.Get(r, varsKey); rv != nil { return rv.(map[string]string) } return nil } // CurrentRoute returns the matched route for the current request, if any. func CurrentRoute(r *http.Request) *Route { if rv := context.Get(r, routeKey); rv != nil { return rv.(*Route) } return nil } func setVars(r *http.Request, val interface{}) { context.Set(r, varsKey, val) } func setCurrentRoute(r *http.Request, val interface{}) { context.Set(r, routeKey, val) } // ---------------------------------------------------------------------------- // Helpers // ---------------------------------------------------------------------------- // cleanPath returns the canonical path for p, eliminating . and .. elements. // Borrowed from the net/http package. func cleanPath(p string) string { if p == "" { return "/" } if p[0] != '/' { p = "/" + p } np := path.Clean(p) // path.Clean removes trailing slash except for root; // put the trailing slash back if necessary. if p[len(p)-1] == '/' && np != "/" { np += "/" } return np } // uniqueVars returns an error if two slices contain duplicated strings. func uniqueVars(s1, s2 []string) error { for _, v1 := range s1 { for _, v2 := range s2 { if v1 == v2 { return fmt.Errorf("mux: duplicated route variable %q", v2) } } } return nil } // mapFromPairs converts variadic string parameters to a string map. func mapFromPairs(pairs ...string) (map[string]string, error) { length := len(pairs) if length%2 != 0 { return nil, fmt.Errorf( "mux: number of parameters must be multiple of 2, got %v", pairs) } m := make(map[string]string, length/2) for i := 0; i < length; i += 2 { m[pairs[i]] = pairs[i+1] } return m, nil } // matchInArray returns true if the given string value is in the array. func matchInArray(arr []string, value string) bool { for _, v := range arr { if v == value { return true } } return false } // matchMap returns true if the given key/value pairs exist in a given map. func matchMap(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool { for k, v := range toCheck { // Check if key exists. if canonicalKey { k = http.CanonicalHeaderKey(k) } if values := toMatch[k]; values == nil { return false } else if v != "" { // If value was defined as an empty string we only check that the // key exists. Otherwise we also check for equality. valueExists := false for _, value := range values { if v == value { valueExists = true break } } if !valueExists { return false } } } return true } ================================================ FILE: vendor/github.com/gorilla/mux/regexp.go ================================================ // Copyright 2012 The Gorilla Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package mux import ( "bytes" "fmt" "net/http" "net/url" "regexp" "strings" ) // newRouteRegexp parses a route template and returns a routeRegexp, // used to match a host, a path or a query string. // // It will extract named variables, assemble a regexp to be matched, create // a "reverse" template to build URLs and compile regexps to validate variable // values used in URL building. // // Previously we accepted only Python-like identifiers for variable // names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that // name and pattern can't be empty, and names can't contain a colon. func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash bool) (*routeRegexp, error) { // Check if it is well-formed. idxs, errBraces := braceIndices(tpl) if errBraces != nil { return nil, errBraces } // Backup the original. template := tpl // Now let's parse it. defaultPattern := "[^/]+" if matchQuery { defaultPattern = "[^?&]+" matchPrefix = true } else if matchHost { defaultPattern = "[^.]+" matchPrefix = false } // Only match strict slash if not matching if matchPrefix || matchHost || matchQuery { strictSlash = false } // Set a flag for strictSlash. endSlash := false if strictSlash && strings.HasSuffix(tpl, "/") { tpl = tpl[:len(tpl)-1] endSlash = true } varsN := make([]string, len(idxs)/2) varsR := make([]*regexp.Regexp, len(idxs)/2) pattern := bytes.NewBufferString("") if !matchQuery { pattern.WriteByte('^') } reverse := bytes.NewBufferString("") var end int var err error for i := 0; i < len(idxs); i += 2 { // Set all values we are interested in. raw := tpl[end:idxs[i]] end = idxs[i+1] parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2) name := parts[0] patt := defaultPattern if len(parts) == 2 { patt = parts[1] } // Name or pattern can't be empty. if name == "" || patt == "" { return nil, fmt.Errorf("mux: missing name or pattern in %q", tpl[idxs[i]:end]) } // Build the regexp pattern. fmt.Fprintf(pattern, "%s(%s)", regexp.QuoteMeta(raw), patt) // Build the reverse template. fmt.Fprintf(reverse, "%s%%s", raw) // Append variable name and compiled pattern. varsN[i/2] = name varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) if err != nil { return nil, err } } // Add the remaining. raw := tpl[end:] pattern.WriteString(regexp.QuoteMeta(raw)) if strictSlash { pattern.WriteString("[/]?") } if !matchPrefix { pattern.WriteByte('$') } reverse.WriteString(raw) if endSlash { reverse.WriteByte('/') } // Compile full regexp. reg, errCompile := regexp.Compile(pattern.String()) if errCompile != nil { return nil, errCompile } // Done! return &routeRegexp{ template: template, matchHost: matchHost, matchQuery: matchQuery, strictSlash: strictSlash, regexp: reg, reverse: reverse.String(), varsN: varsN, varsR: varsR, }, nil } // routeRegexp stores a regexp to match a host or path and information to // collect and validate route variables. type routeRegexp struct { // The unmodified template. template string // True for host match, false for path or query string match. matchHost bool // True for query string match, false for path and host match. matchQuery bool // The strictSlash value defined on the route, but disabled if PathPrefix was used. strictSlash bool // Expanded regexp. regexp *regexp.Regexp // Reverse template. reverse string // Variable names. varsN []string // Variable regexps (validators). varsR []*regexp.Regexp } // Match matches the regexp against the URL host or path. func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { if !r.matchHost { if r.matchQuery { return r.regexp.MatchString(req.URL.RawQuery) } else { return r.regexp.MatchString(req.URL.Path) } } return r.regexp.MatchString(getHost(req)) } // url builds a URL part using the given values. func (r *routeRegexp) url(values map[string]string) (string, error) { urlValues := make([]interface{}, len(r.varsN)) for k, v := range r.varsN { value, ok := values[v] if !ok { return "", fmt.Errorf("mux: missing route variable %q", v) } urlValues[k] = value } rv := fmt.Sprintf(r.reverse, urlValues...) if !r.regexp.MatchString(rv) { // The URL is checked against the full regexp, instead of checking // individual variables. This is faster but to provide a good error // message, we check individual regexps if the URL doesn't match. for k, v := range r.varsN { if !r.varsR[k].MatchString(values[v]) { return "", fmt.Errorf( "mux: variable %q doesn't match, expected %q", values[v], r.varsR[k].String()) } } } return rv, nil } // braceIndices returns the first level curly brace indices from a string. // It returns an error in case of unbalanced braces. func braceIndices(s string) ([]int, error) { var level, idx int idxs := make([]int, 0) for i := 0; i < len(s); i++ { switch s[i] { case '{': if level++; level == 1 { idx = i } case '}': if level--; level == 0 { idxs = append(idxs, idx, i+1) } else if level < 0 { return nil, fmt.Errorf("mux: unbalanced braces in %q", s) } } } if level != 0 { return nil, fmt.Errorf("mux: unbalanced braces in %q", s) } return idxs, nil } // ---------------------------------------------------------------------------- // routeRegexpGroup // ---------------------------------------------------------------------------- // routeRegexpGroup groups the route matchers that carry variables. type routeRegexpGroup struct { host *routeRegexp path *routeRegexp queries []*routeRegexp } // setMatch extracts the variables from the URL once a route matches. func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { // Store host variables. if v.host != nil { hostVars := v.host.regexp.FindStringSubmatch(getHost(req)) if hostVars != nil { for k, v := range v.host.varsN { m.Vars[v] = hostVars[k+1] } } } // Store path variables. if v.path != nil { pathVars := v.path.regexp.FindStringSubmatch(req.URL.Path) if pathVars != nil { for k, v := range v.path.varsN { m.Vars[v] = pathVars[k+1] } // Check if we should redirect. if v.path.strictSlash { p1 := strings.HasSuffix(req.URL.Path, "/") p2 := strings.HasSuffix(v.path.template, "/") if p1 != p2 { u, _ := url.Parse(req.URL.String()) if p1 { u.Path = u.Path[:len(u.Path)-1] } else { u.Path += "/" } m.Handler = http.RedirectHandler(u.String(), 301) } } } } // Store query string variables. rawQuery := req.URL.RawQuery for _, q := range v.queries { queryVars := q.regexp.FindStringSubmatch(rawQuery) if queryVars != nil { for k, v := range q.varsN { m.Vars[v] = queryVars[k+1] } } } } // getHost tries its best to return the request host. func getHost(r *http.Request) string { if r.URL.IsAbs() { return r.URL.Host } host := r.Host // Slice off any port information. if i := strings.Index(host, ":"); i != -1 { host = host[:i] } return host } ================================================ FILE: vendor/github.com/gorilla/mux/route.go ================================================ // Copyright 2012 The Gorilla Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package mux import ( "errors" "fmt" "net/http" "net/url" "strings" ) // Route stores information to match a request and build URLs. type Route struct { // Parent where the route was registered (a Router). parent parentRoute // Request handler for the route. handler http.Handler // List of matchers. matchers []matcher // Manager for the variables from host and path. regexp *routeRegexpGroup // If true, when the path pattern is "/path/", accessing "/path" will // redirect to the former and vice versa. strictSlash bool // If true, this route never matches: it is only used to build URLs. buildOnly bool // The name used to build URLs. name string // Error resulted from building a route. err error buildVarsFunc BuildVarsFunc } // Match matches the route against the request. func (r *Route) Match(req *http.Request, match *RouteMatch) bool { if r.buildOnly || r.err != nil { return false } // Match everything. for _, m := range r.matchers { if matched := m.Match(req, match); !matched { return false } } // Yay, we have a match. Let's collect some info about it. if match.Route == nil { match.Route = r } if match.Handler == nil { match.Handler = r.handler } if match.Vars == nil { match.Vars = make(map[string]string) } // Set variables. if r.regexp != nil { r.regexp.setMatch(req, match, r) } return true } // ---------------------------------------------------------------------------- // Route attributes // ---------------------------------------------------------------------------- // GetError returns an error resulted from building the route, if any. func (r *Route) GetError() error { return r.err } // BuildOnly sets the route to never match: it is only used to build URLs. func (r *Route) BuildOnly() *Route { r.buildOnly = true return r } // Handler -------------------------------------------------------------------- // Handler sets a handler for the route. func (r *Route) Handler(handler http.Handler) *Route { if r.err == nil { r.handler = handler } return r } // HandlerFunc sets a handler function for the route. func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route { return r.Handler(http.HandlerFunc(f)) } // GetHandler returns the handler for the route, if any. func (r *Route) GetHandler() http.Handler { return r.handler } // Name ----------------------------------------------------------------------- // Name sets the name for the route, used to build URLs. // If the name was registered already it will be overwritten. func (r *Route) Name(name string) *Route { if r.name != "" { r.err = fmt.Errorf("mux: route already has name %q, can't set %q", r.name, name) } if r.err == nil { r.name = name r.getNamedRoutes()[name] = r } return r } // GetName returns the name for the route, if any. func (r *Route) GetName() string { return r.name } // ---------------------------------------------------------------------------- // Matchers // ---------------------------------------------------------------------------- // matcher types try to match a request. type matcher interface { Match(*http.Request, *RouteMatch) bool } // addMatcher adds a matcher to the route. func (r *Route) addMatcher(m matcher) *Route { if r.err == nil { r.matchers = append(r.matchers, m) } return r } // addRegexpMatcher adds a host or path matcher and builder to a route. func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery bool) error { if r.err != nil { return r.err } r.regexp = r.getRegexpGroup() if !matchHost && !matchQuery { if len(tpl) == 0 || tpl[0] != '/' { return fmt.Errorf("mux: path must start with a slash, got %q", tpl) } if r.regexp.path != nil { tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl } } rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash) if err != nil { return err } for _, q := range r.regexp.queries { if err = uniqueVars(rr.varsN, q.varsN); err != nil { return err } } if matchHost { if r.regexp.path != nil { if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil { return err } } r.regexp.host = rr } else { if r.regexp.host != nil { if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil { return err } } if matchQuery { r.regexp.queries = append(r.regexp.queries, rr) } else { r.regexp.path = rr } } r.addMatcher(rr) return nil } // Headers -------------------------------------------------------------------- // headerMatcher matches the request against header values. type headerMatcher map[string]string func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { return matchMap(m, r.Header, true) } // Headers adds a matcher for request header values. // It accepts a sequence of key/value pairs to be matched. For example: // // r := mux.NewRouter() // r.Headers("Content-Type", "application/json", // "X-Requested-With", "XMLHttpRequest") // // The above route will only match if both request header values match. // // It the value is an empty string, it will match any value if the key is set. func (r *Route) Headers(pairs ...string) *Route { if r.err == nil { var headers map[string]string headers, r.err = mapFromPairs(pairs...) return r.addMatcher(headerMatcher(headers)) } return r } // Host ----------------------------------------------------------------------- // Host adds a matcher for the URL host. // It accepts a template with zero or more URL variables enclosed by {}. // Variables can define an optional regexp pattern to be matched: // // - {name} matches anything until the next dot. // // - {name:pattern} matches the given regexp pattern. // // For example: // // r := mux.NewRouter() // r.Host("www.domain.com") // r.Host("{subdomain}.domain.com") // r.Host("{subdomain:[a-z]+}.domain.com") // // Variable names must be unique in a given route. They can be retrieved // calling mux.Vars(request). func (r *Route) Host(tpl string) *Route { r.err = r.addRegexpMatcher(tpl, true, false, false) return r } // MatcherFunc ---------------------------------------------------------------- // MatcherFunc is the function signature used by custom matchers. type MatcherFunc func(*http.Request, *RouteMatch) bool func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool { return m(r, match) } // MatcherFunc adds a custom function to be used as request matcher. func (r *Route) MatcherFunc(f MatcherFunc) *Route { return r.addMatcher(f) } // Methods -------------------------------------------------------------------- // methodMatcher matches the request against HTTP methods. type methodMatcher []string func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool { return matchInArray(m, r.Method) } // Methods adds a matcher for HTTP methods. // It accepts a sequence of one or more methods to be matched, e.g.: // "GET", "POST", "PUT". func (r *Route) Methods(methods ...string) *Route { for k, v := range methods { methods[k] = strings.ToUpper(v) } return r.addMatcher(methodMatcher(methods)) } // Path ----------------------------------------------------------------------- // Path adds a matcher for the URL path. // It accepts a template with zero or more URL variables enclosed by {}. The // template must start with a "/". // Variables can define an optional regexp pattern to be matched: // // - {name} matches anything until the next slash. // // - {name:pattern} matches the given regexp pattern. // // For example: // // r := mux.NewRouter() // r.Path("/products/").Handler(ProductsHandler) // r.Path("/products/{key}").Handler(ProductsHandler) // r.Path("/articles/{category}/{id:[0-9]+}"). // Handler(ArticleHandler) // // Variable names must be unique in a given route. They can be retrieved // calling mux.Vars(request). func (r *Route) Path(tpl string) *Route { r.err = r.addRegexpMatcher(tpl, false, false, false) return r } // PathPrefix ----------------------------------------------------------------- // PathPrefix adds a matcher for the URL path prefix. This matches if the given // template is a prefix of the full URL path. See Route.Path() for details on // the tpl argument. // // Note that it does not treat slashes specially ("/foobar/" will be matched by // the prefix "/foo") so you may want to use a trailing slash here. // // Also note that the setting of Router.StrictSlash() has no effect on routes // with a PathPrefix matcher. func (r *Route) PathPrefix(tpl string) *Route { r.err = r.addRegexpMatcher(tpl, false, true, false) return r } // Query ---------------------------------------------------------------------- // Queries adds a matcher for URL query values. // It accepts a sequence of key/value pairs. Values may define variables. // For example: // // r := mux.NewRouter() // r.Queries("foo", "bar", "id", "{id:[0-9]+}") // // The above route will only match if the URL contains the defined queries // values, e.g.: ?foo=bar&id=42. // // It the value is an empty string, it will match any value if the key is set. // // Variables can define an optional regexp pattern to be matched: // // - {name} matches anything until the next slash. // // - {name:pattern} matches the given regexp pattern. func (r *Route) Queries(pairs ...string) *Route { length := len(pairs) if length%2 != 0 { r.err = fmt.Errorf( "mux: number of parameters must be multiple of 2, got %v", pairs) return nil } for i := 0; i < length; i += 2 { if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, true, true); r.err != nil { return r } } return r } // Schemes -------------------------------------------------------------------- // schemeMatcher matches the request against URL schemes. type schemeMatcher []string func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool { return matchInArray(m, r.URL.Scheme) } // Schemes adds a matcher for URL schemes. // It accepts a sequence of schemes to be matched, e.g.: "http", "https". func (r *Route) Schemes(schemes ...string) *Route { for k, v := range schemes { schemes[k] = strings.ToLower(v) } return r.addMatcher(schemeMatcher(schemes)) } // BuildVarsFunc -------------------------------------------------------------- // BuildVarsFunc is the function signature used by custom build variable // functions (which can modify route variables before a route's URL is built). type BuildVarsFunc func(map[string]string) map[string]string // BuildVarsFunc adds a custom function to be used to modify build variables // before a route's URL is built. func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { r.buildVarsFunc = f return r } // Subrouter ------------------------------------------------------------------ // Subrouter creates a subrouter for the route. // // It will test the inner routes only if the parent route matched. For example: // // r := mux.NewRouter() // s := r.Host("www.domain.com").Subrouter() // s.HandleFunc("/products/", ProductsHandler) // s.HandleFunc("/products/{key}", ProductHandler) // s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) // // Here, the routes registered in the subrouter won't be tested if the host // doesn't match. func (r *Route) Subrouter() *Router { router := &Router{parent: r, strictSlash: r.strictSlash} r.addMatcher(router) return router } // ---------------------------------------------------------------------------- // URL building // ---------------------------------------------------------------------------- // URL builds a URL for the route. // // It accepts a sequence of key/value pairs for the route variables. For // example, given this route: // // r := mux.NewRouter() // r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). // Name("article") // // ...a URL for it can be built using: // // url, err := r.Get("article").URL("category", "technology", "id", "42") // // ...which will return an url.URL with the following path: // // "/articles/technology/42" // // This also works for host variables: // // r := mux.NewRouter() // r.Host("{subdomain}.domain.com"). // HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). // Name("article") // // // url.String() will be "http://news.domain.com/articles/technology/42" // url, err := r.Get("article").URL("subdomain", "news", // "category", "technology", // "id", "42") // // All variables defined in the route are required, and their values must // conform to the corresponding patterns. func (r *Route) URL(pairs ...string) (*url.URL, error) { if r.err != nil { return nil, r.err } if r.regexp == nil { return nil, errors.New("mux: route doesn't have a host or path") } values, err := r.prepareVars(pairs...) if err != nil { return nil, err } var scheme, host, path string if r.regexp.host != nil { // Set a default scheme. scheme = "http" if host, err = r.regexp.host.url(values); err != nil { return nil, err } } if r.regexp.path != nil { if path, err = r.regexp.path.url(values); err != nil { return nil, err } } return &url.URL{ Scheme: scheme, Host: host, Path: path, }, nil } // URLHost builds the host part of the URL for a route. See Route.URL(). // // The route must have a host defined. func (r *Route) URLHost(pairs ...string) (*url.URL, error) { if r.err != nil { return nil, r.err } if r.regexp == nil || r.regexp.host == nil { return nil, errors.New("mux: route doesn't have a host") } values, err := r.prepareVars(pairs...) if err != nil { return nil, err } host, err := r.regexp.host.url(values) if err != nil { return nil, err } return &url.URL{ Scheme: "http", Host: host, }, nil } // URLPath builds the path part of the URL for a route. See Route.URL(). // // The route must have a path defined. func (r *Route) URLPath(pairs ...string) (*url.URL, error) { if r.err != nil { return nil, r.err } if r.regexp == nil || r.regexp.path == nil { return nil, errors.New("mux: route doesn't have a path") } values, err := r.prepareVars(pairs...) if err != nil { return nil, err } path, err := r.regexp.path.url(values) if err != nil { return nil, err } return &url.URL{ Path: path, }, nil } // prepareVars converts the route variable pairs into a map. If the route has a // BuildVarsFunc, it is invoked. func (r *Route) prepareVars(pairs ...string) (map[string]string, error) { m, err := mapFromPairs(pairs...) if err != nil { return nil, err } return r.buildVars(m), nil } func (r *Route) buildVars(m map[string]string) map[string]string { if r.parent != nil { m = r.parent.buildVars(m) } if r.buildVarsFunc != nil { m = r.buildVarsFunc(m) } return m } // ---------------------------------------------------------------------------- // parentRoute // ---------------------------------------------------------------------------- // parentRoute allows routes to know about parent host and path definitions. type parentRoute interface { getNamedRoutes() map[string]*Route getRegexpGroup() *routeRegexpGroup buildVars(map[string]string) map[string]string } // getNamedRoutes returns the map where named routes are registered. func (r *Route) getNamedRoutes() map[string]*Route { if r.parent == nil { // During tests router is not always set. r.parent = NewRouter() } return r.parent.getNamedRoutes() } // getRegexpGroup returns regexp definitions from this route. func (r *Route) getRegexpGroup() *routeRegexpGroup { if r.regexp == nil { if r.parent == nil { // During tests router is not always set. r.parent = NewRouter() } regexp := r.parent.getRegexpGroup() if regexp == nil { r.regexp = new(routeRegexpGroup) } else { // Copy. r.regexp = &routeRegexpGroup{ host: regexp.host, path: regexp.path, queries: regexp.queries, } } } return r.regexp } ================================================ FILE: vendor/github.com/mitchellh/goamz/LICENSE ================================================ This software is licensed under the LGPLv3, included below. As a special exception to the GNU Lesser General Public License version 3 ("LGPL3"), the copyright holders of this Library give you permission to convey to a third party a Combined Work that links statically or dynamically to this Library without providing any Minimal Corresponding Source or Minimal Application Code as set out in 4d or providing the installation information set out in section 4e, provided that you comply with the other provisions of LGPL3 and provided that you meet, for the Application the terms and conditions of the license(s) which apply to the Application. Except as stated in this special exception, the provisions of LGPL3 will continue to comply in full to this Library. If you modify this Library, you may apply this exception to your version of this Library, but you are not obliged to do so. If you do not wish to do so, delete this exception statement from your version. This exception does not (and cannot) modify any license terms which apply to the Application, with which you must still comply. GNU LESSER GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. This version of the GNU Lesser General Public License incorporates the terms and conditions of version 3 of the GNU General Public License, supplemented by the additional permissions listed below. 0. Additional Definitions. As used herein, "this License" refers to version 3 of the GNU Lesser General Public License, and the "GNU GPL" refers to version 3 of the GNU General Public License. "The Library" refers to a covered work governed by this License, other than an Application or a Combined Work as defined below. An "Application" is any work that makes use of an interface provided by the Library, but which is not otherwise based on the Library. Defining a subclass of a class defined by the Library is deemed a mode of using an interface provided by the Library. A "Combined Work" is a work produced by combining or linking an Application with the Library. The particular version of the Library with which the Combined Work was made is also called the "Linked Version". The "Minimal Corresponding Source" for a Combined Work means the Corresponding Source for the Combined Work, excluding any source code for portions of the Combined Work that, considered in isolation, are based on the Application, and not on the Linked Version. The "Corresponding Application Code" for a Combined Work means the object code and/or source code for the Application, including any data and utility programs needed for reproducing the Combined Work from the Application, but excluding the System Libraries of the Combined Work. 1. Exception to Section 3 of the GNU GPL. You may convey a covered work under sections 3 and 4 of this License without being bound by section 3 of the GNU GPL. 2. Conveying Modified Versions. If you modify a copy of the Library, and, in your modifications, a facility refers to a function or data to be supplied by an Application that uses the facility (other than as an argument passed when the facility is invoked), then you may convey a copy of the modified version: a) under this License, provided that you make a good faith effort to ensure that, in the event an Application does not supply the function or data, the facility still operates, and performs whatever part of its purpose remains meaningful, or b) under the GNU GPL, with none of the additional permissions of this License applicable to that copy. 3. Object Code Incorporating Material from Library Header Files. The object code form of an Application may incorporate material from a header file that is part of the Library. You may convey such object code under terms of your choice, provided that, if the incorporated material is not limited to numerical parameters, data structure layouts and accessors, or small macros, inline functions and templates (ten or fewer lines in length), you do both of the following: a) Give prominent notice with each copy of the object code that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the object code with a copy of the GNU GPL and this license document. 4. Combined Works. You may convey a Combined Work under terms of your choice that, taken together, effectively do not restrict modification of the portions of the Library contained in the Combined Work and reverse engineering for debugging such modifications, if you also do each of the following: a) Give prominent notice with each copy of the Combined Work that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the Combined Work with a copy of the GNU GPL and this license document. c) For a Combined Work that displays copyright notices during execution, include the copyright notice for the Library among these notices, as well as a reference directing the user to the copies of the GNU GPL and this license document. d) Do one of the following: 0) Convey the Minimal Corresponding Source under the terms of this License, and the Corresponding Application Code in a form suitable for, and under terms that permit, the user to recombine or relink the Application with a modified version of the Linked Version to produce a modified Combined Work, in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source. 1) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (a) uses at run time a copy of the Library already present on the user's computer system, and (b) will operate properly with a modified version of the Library that is interface-compatible with the Linked Version. e) Provide Installation Information, but only if you would otherwise be required to provide such information under section 6 of the GNU GPL, and only to the extent that such information is necessary to install and execute a modified version of the Combined Work produced by recombining or relinking the Application with a modified version of the Linked Version. (If you use option 4d0, the Installation Information must accompany the Minimal Corresponding Source and Corresponding Application Code. If you use option 4d1, you must provide the Installation Information in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source.) 5. Combined Libraries. You may place library facilities that are a work based on the Library side by side in a single library together with other library facilities that are not Applications and are not covered by this License, and convey such a combined library under terms of your choice, if you do both of the following: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities, conveyed under the terms of this License. b) Give prominent notice with the combined library that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 6. Revised Versions of the GNU Lesser General Public License. The Free Software Foundation may publish revised and/or new versions of the GNU Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library as you received it specifies that a certain numbered version of the GNU Lesser General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that published version or of any later version published by the Free Software Foundation. If the Library as you received it does not specify a version number of the GNU Lesser General Public License, you may choose any version of the GNU Lesser General Public License ever published by the Free Software Foundation. If the Library as you received it specifies that a proxy can decide whether future versions of the GNU Lesser General Public License shall apply, that proxy's public statement of acceptance of any version is permanent authorization for you to choose that version for the Library. ================================================ FILE: vendor/github.com/mitchellh/goamz/aws/attempt.go ================================================ package aws import ( "time" ) // AttemptStrategy represents a strategy for waiting for an action // to complete successfully. This is an internal type used by the // implementation of other goamz packages. type AttemptStrategy struct { Total time.Duration // total duration of attempt. Delay time.Duration // interval between each try in the burst. Min int // minimum number of retries; overrides Total } type Attempt struct { strategy AttemptStrategy last time.Time end time.Time force bool count int } // Start begins a new sequence of attempts for the given strategy. func (s AttemptStrategy) Start() *Attempt { now := time.Now() return &Attempt{ strategy: s, last: now, end: now.Add(s.Total), force: true, } } // Next waits until it is time to perform the next attempt or returns // false if it is time to stop trying. func (a *Attempt) Next() bool { now := time.Now() sleep := a.nextSleep(now) if !a.force && !now.Add(sleep).Before(a.end) && a.strategy.Min <= a.count { return false } a.force = false if sleep > 0 && a.count > 0 { time.Sleep(sleep) now = time.Now() } a.count++ a.last = now return true } func (a *Attempt) nextSleep(now time.Time) time.Duration { sleep := a.strategy.Delay - now.Sub(a.last) if sleep < 0 { return 0 } return sleep } // HasNext returns whether another attempt will be made if the current // one fails. If it returns true, the following call to Next is // guaranteed to return true. func (a *Attempt) HasNext() bool { if a.force || a.strategy.Min > a.count { return true } now := time.Now() if now.Add(a.nextSleep(now)).Before(a.end) { a.force = true return true } return false } ================================================ FILE: vendor/github.com/mitchellh/goamz/aws/aws.go ================================================ // // goamz - Go packages to interact with the Amazon Web Services. // // https://wiki.ubuntu.com/goamz // // Copyright (c) 2011 Canonical Ltd. // // Written by Gustavo Niemeyer // package aws import ( "encoding/json" "errors" "fmt" "io/ioutil" "os" "github.com/vaughan0/go-ini" ) // Region defines the URLs where AWS services may be accessed. // // See http://goo.gl/d8BP1 for more details. type Region struct { Name string // the canonical name of this region. EC2Endpoint string S3Endpoint string S3BucketEndpoint string // Not needed by AWS S3. Use ${bucket} for bucket name. S3LocationConstraint bool // true if this region requires a LocationConstraint declaration. S3LowercaseBucket bool // true if the region requires bucket names to be lower case. SDBEndpoint string SNSEndpoint string SQSEndpoint string IAMEndpoint string ELBEndpoint string AutoScalingEndpoint string RdsEndpoint string Route53Endpoint string } var USGovWest = Region{ "us-gov-west-1", "https://ec2.us-gov-west-1.amazonaws.com", "https://s3-fips-us-gov-west-1.amazonaws.com", "", true, true, "", "https://sns.us-gov-west-1.amazonaws.com", "https://sqs.us-gov-west-1.amazonaws.com", "https://iam.us-gov.amazonaws.com", "https://elasticloadbalancing.us-gov-west-1.amazonaws.com", "https://autoscaling.us-gov-west-1.amazonaws.com", "https://rds.us-gov-west-1.amazonaws.com", "https://route53.amazonaws.com", } var USEast = Region{ "us-east-1", "https://ec2.us-east-1.amazonaws.com", "https://s3.amazonaws.com", "", false, false, "https://sdb.amazonaws.com", "https://sns.us-east-1.amazonaws.com", "https://sqs.us-east-1.amazonaws.com", "https://iam.amazonaws.com", "https://elasticloadbalancing.us-east-1.amazonaws.com", "https://autoscaling.us-east-1.amazonaws.com", "https://rds.us-east-1.amazonaws.com", "https://route53.amazonaws.com", } var USWest = Region{ "us-west-1", "https://ec2.us-west-1.amazonaws.com", "https://s3-us-west-1.amazonaws.com", "", true, true, "https://sdb.us-west-1.amazonaws.com", "https://sns.us-west-1.amazonaws.com", "https://sqs.us-west-1.amazonaws.com", "https://iam.amazonaws.com", "https://elasticloadbalancing.us-west-1.amazonaws.com", "https://autoscaling.us-west-1.amazonaws.com", "https://rds.us-west-1.amazonaws.com", "https://route53.amazonaws.com", } var USWest2 = Region{ "us-west-2", "https://ec2.us-west-2.amazonaws.com", "https://s3-us-west-2.amazonaws.com", "", true, true, "https://sdb.us-west-2.amazonaws.com", "https://sns.us-west-2.amazonaws.com", "https://sqs.us-west-2.amazonaws.com", "https://iam.amazonaws.com", "https://elasticloadbalancing.us-west-2.amazonaws.com", "https://autoscaling.us-west-2.amazonaws.com", "https://rds.us-west-2.amazonaws.com", "https://route53.amazonaws.com", } var EUWest = Region{ "eu-west-1", "https://ec2.eu-west-1.amazonaws.com", "https://s3-eu-west-1.amazonaws.com", "", true, true, "https://sdb.eu-west-1.amazonaws.com", "https://sns.eu-west-1.amazonaws.com", "https://sqs.eu-west-1.amazonaws.com", "https://iam.amazonaws.com", "https://elasticloadbalancing.eu-west-1.amazonaws.com", "https://autoscaling.eu-west-1.amazonaws.com", "https://rds.eu-west-1.amazonaws.com", "https://route53.amazonaws.com", } var EUCentral = Region{ "eu-central-1", "https://ec2.eu-central-1.amazonaws.com", "https://s3-eu-central-1.amazonaws.com", "", true, true, "", "https://sns.eu-central-1.amazonaws.com", "https://sqs.eu-central-1.amazonaws.com", "https://iam.amazonaws.com", "https://elasticloadbalancing.eu-central-1.amazonaws.com", "https://autoscaling.eu-central-1.amazonaws.com", "https://rds.eu-central-1.amazonaws.com", "https://route53.amazonaws.com", } var APSoutheast = Region{ "ap-southeast-1", "https://ec2.ap-southeast-1.amazonaws.com", "https://s3-ap-southeast-1.amazonaws.com", "", true, true, "https://sdb.ap-southeast-1.amazonaws.com", "https://sns.ap-southeast-1.amazonaws.com", "https://sqs.ap-southeast-1.amazonaws.com", "https://iam.amazonaws.com", "https://elasticloadbalancing.ap-southeast-1.amazonaws.com", "https://autoscaling.ap-southeast-1.amazonaws.com", "https://rds.ap-southeast-1.amazonaws.com", "https://route53.amazonaws.com", } var APSoutheast2 = Region{ "ap-southeast-2", "https://ec2.ap-southeast-2.amazonaws.com", "https://s3-ap-southeast-2.amazonaws.com", "", true, true, "https://sdb.ap-southeast-2.amazonaws.com", "https://sns.ap-southeast-2.amazonaws.com", "https://sqs.ap-southeast-2.amazonaws.com", "https://iam.amazonaws.com", "https://elasticloadbalancing.ap-southeast-2.amazonaws.com", "https://autoscaling.ap-southeast-2.amazonaws.com", "https://rds.ap-southeast-2.amazonaws.com", "https://route53.amazonaws.com", } var APNortheast = Region{ "ap-northeast-1", "https://ec2.ap-northeast-1.amazonaws.com", "https://s3-ap-northeast-1.amazonaws.com", "", true, true, "https://sdb.ap-northeast-1.amazonaws.com", "https://sns.ap-northeast-1.amazonaws.com", "https://sqs.ap-northeast-1.amazonaws.com", "https://iam.amazonaws.com", "https://elasticloadbalancing.ap-northeast-1.amazonaws.com", "https://autoscaling.ap-northeast-1.amazonaws.com", "https://rds.ap-northeast-1.amazonaws.com", "https://route53.amazonaws.com", } var SAEast = Region{ "sa-east-1", "https://ec2.sa-east-1.amazonaws.com", "https://s3-sa-east-1.amazonaws.com", "", true, true, "https://sdb.sa-east-1.amazonaws.com", "https://sns.sa-east-1.amazonaws.com", "https://sqs.sa-east-1.amazonaws.com", "https://iam.amazonaws.com", "https://elasticloadbalancing.sa-east-1.amazonaws.com", "https://autoscaling.sa-east-1.amazonaws.com", "https://rds.sa-east-1.amazonaws.com", "https://route53.amazonaws.com", } var CNNorth = Region{ "cn-north-1", "https://ec2.cn-north-1.amazonaws.com.cn", "https://s3.cn-north-1.amazonaws.com.cn", "", true, true, "", "https://sns.cn-north-1.amazonaws.com.cn", "https://sqs.cn-north-1.amazonaws.com.cn", "https://iam.cn-north-1.amazonaws.com.cn", "https://elasticloadbalancing.cn-north-1.amazonaws.com.cn", "https://autoscaling.cn-north-1.amazonaws.com.cn", "https://rds.cn-north-1.amazonaws.com.cn", "https://route53.amazonaws.com", } var Regions = map[string]Region{ APNortheast.Name: APNortheast, APSoutheast.Name: APSoutheast, APSoutheast2.Name: APSoutheast2, EUWest.Name: EUWest, EUCentral.Name: EUCentral, USEast.Name: USEast, USWest.Name: USWest, USWest2.Name: USWest2, SAEast.Name: SAEast, USGovWest.Name: USGovWest, CNNorth.Name: CNNorth, } type Auth struct { AccessKey, SecretKey, Token string } var unreserved = make([]bool, 128) var hex = "0123456789ABCDEF" func init() { // RFC3986 u := "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567890-_.~" for _, c := range u { unreserved[c] = true } } type credentials struct { Code string LastUpdated string Type string AccessKeyId string SecretAccessKey string Token string Expiration string } // GetMetaData retrieves instance metadata about the current machine. // // See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html for more details. func GetMetaData(path string) (contents []byte, err error) { url := "http://169.254.169.254/latest/meta-data/" + path resp, err := RetryingClient.Get(url) if err != nil { return } defer resp.Body.Close() if resp.StatusCode != 200 { err = fmt.Errorf("Code %d returned for url %s", resp.StatusCode, url) return } body, err := ioutil.ReadAll(resp.Body) if err != nil { return } return []byte(body), err } func getInstanceCredentials() (cred credentials, err error) { credentialPath := "iam/security-credentials/" // Get the instance role role, err := GetMetaData(credentialPath) if err != nil { return } // Get the instance role credentials credentialJSON, err := GetMetaData(credentialPath + string(role)) if err != nil { return } err = json.Unmarshal([]byte(credentialJSON), &cred) return } // GetAuth creates an Auth based on either passed in credentials, // environment information or instance based role credentials. func GetAuth(accessKey string, secretKey string) (auth Auth, err error) { // First try passed in credentials if accessKey != "" && secretKey != "" { return Auth{accessKey, secretKey, ""}, nil } // Next try to get auth from the environment auth, err = SharedAuth() if err == nil { // Found auth, return return } // Next try to get auth from the environment auth, err = EnvAuth() if err == nil { // Found auth, return return } // Next try getting auth from the instance role cred, err := getInstanceCredentials() if err == nil { // Found auth, return auth.AccessKey = cred.AccessKeyId auth.SecretKey = cred.SecretAccessKey auth.Token = cred.Token return } err = errors.New("No valid AWS authentication found") return } // SharedAuth creates an Auth based on shared credentials stored in // $HOME/.aws/credentials. The AWS_PROFILE environment variables is used to // select the profile. func SharedAuth() (auth Auth, err error) { var profileName = os.Getenv("AWS_PROFILE") if profileName == "" { profileName = "default" } var credentialsFile = os.Getenv("AWS_CREDENTIAL_FILE") if credentialsFile == "" { var homeDir = os.Getenv("HOME") if homeDir == "" { err = errors.New("Could not get HOME") return } credentialsFile = homeDir + "/.aws/credentials" } file, err := ini.LoadFile(credentialsFile) if err != nil { err = errors.New("Couldn't parse AWS credentials file") return } var profile = file[profileName] if profile == nil { err = errors.New("Couldn't find profile in AWS credentials file") return } auth.AccessKey = profile["aws_access_key_id"] auth.SecretKey = profile["aws_secret_access_key"] if auth.AccessKey == "" { err = errors.New("AWS_ACCESS_KEY_ID not found in environment in credentials file") } if auth.SecretKey == "" { err = errors.New("AWS_SECRET_ACCESS_KEY not found in credentials file") } return } // EnvAuth creates an Auth based on environment information. // The AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment // For accounts that require a security token, it is read from AWS_SECURITY_TOKEN // variables are used. func EnvAuth() (auth Auth, err error) { auth.AccessKey = os.Getenv("AWS_ACCESS_KEY_ID") if auth.AccessKey == "" { auth.AccessKey = os.Getenv("AWS_ACCESS_KEY") } auth.SecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY") if auth.SecretKey == "" { auth.SecretKey = os.Getenv("AWS_SECRET_KEY") } auth.Token = os.Getenv("AWS_SECURITY_TOKEN") if auth.AccessKey == "" { err = errors.New("AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment") } if auth.SecretKey == "" { err = errors.New("AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment") } return } // Encode takes a string and URI-encodes it in a way suitable // to be used in AWS signatures. func Encode(s string) string { encode := false for i := 0; i != len(s); i++ { c := s[i] if c > 127 || !unreserved[c] { encode = true break } } if !encode { return s } e := make([]byte, len(s)*3) ei := 0 for i := 0; i != len(s); i++ { c := s[i] if c > 127 || !unreserved[c] { e[ei] = '%' e[ei+1] = hex[c>>4] e[ei+2] = hex[c&0xF] ei += 3 } else { e[ei] = c ei += 1 } } return string(e[:ei]) } ================================================ FILE: vendor/github.com/mitchellh/goamz/aws/client.go ================================================ package aws import ( "math" "net" "net/http" "time" ) type RetryableFunc func(*http.Request, *http.Response, error) bool type WaitFunc func(try int) type DeadlineFunc func() time.Time type ResilientTransport struct { // Timeout is the maximum amount of time a dial will wait for // a connect to complete. // // The default is no timeout. // // With or without a timeout, the operating system may impose // its own earlier timeout. For instance, TCP timeouts are // often around 3 minutes. DialTimeout time.Duration // MaxTries, if non-zero, specifies the number of times we will retry on // failure. Retries are only attempted for temporary network errors or known // safe failures. MaxTries int Deadline DeadlineFunc ShouldRetry RetryableFunc Wait WaitFunc transport *http.Transport } // Convenience method for creating an http client func NewClient(rt *ResilientTransport) *http.Client { rt.transport = &http.Transport{ Dial: func(netw, addr string) (net.Conn, error) { c, err := net.DialTimeout(netw, addr, rt.DialTimeout) if err != nil { return nil, err } c.SetDeadline(rt.Deadline()) return c, nil }, DisableKeepAlives: true, Proxy: http.ProxyFromEnvironment, } // TODO: Would be nice is ResilientTransport allowed clients to initialize // with http.Transport attributes. return &http.Client{ Transport: rt, } } var retryingTransport = &ResilientTransport{ Deadline: func() time.Time { return time.Now().Add(5 * time.Second) }, DialTimeout: 10 * time.Second, MaxTries: 3, ShouldRetry: awsRetry, Wait: ExpBackoff, } // Exported default client var RetryingClient = NewClient(retryingTransport) func (t *ResilientTransport) RoundTrip(req *http.Request) (*http.Response, error) { return t.tries(req) } // Retry a request a maximum of t.MaxTries times. // We'll only retry if the proper criteria are met. // If a wait function is specified, wait that amount of time // In between requests. func (t *ResilientTransport) tries(req *http.Request) (res *http.Response, err error) { for try := 0; try < t.MaxTries; try += 1 { res, err = t.transport.RoundTrip(req) if !t.ShouldRetry(req, res, err) { break } if res != nil { res.Body.Close() } if t.Wait != nil { t.Wait(try) } } return } func ExpBackoff(try int) { time.Sleep(100 * time.Millisecond * time.Duration(math.Exp2(float64(try)))) } func LinearBackoff(try int) { time.Sleep(time.Duration(try*100) * time.Millisecond) } // Decide if we should retry a request. // In general, the criteria for retrying a request is described here // http://docs.aws.amazon.com/general/latest/gr/api-retries.html func awsRetry(req *http.Request, res *http.Response, err error) bool { retry := false // Retry if there's a temporary network error. if neterr, ok := err.(net.Error); ok { if neterr.Temporary() { retry = true } } // Retry if we get a 5xx series error. if res != nil { if res.StatusCode >= 500 && res.StatusCode < 600 { retry = true } } return retry } ================================================ FILE: vendor/github.com/mitchellh/goamz/s3/multi.go ================================================ package s3 import ( "bytes" "crypto/md5" "encoding/base64" "encoding/hex" "encoding/xml" "errors" "io" "sort" "strconv" ) // Multi represents an unfinished multipart upload. // // Multipart uploads allow sending big objects in smaller chunks. // After all parts have been sent, the upload must be explicitly // completed by calling Complete with the list of parts. // // See http://goo.gl/vJfTG for an overview of multipart uploads. type Multi struct { Bucket *Bucket Key string UploadId string } // That's the default. Here just for testing. var listMultiMax = 1000 type listMultiResp struct { NextKeyMarker string NextUploadIdMarker string IsTruncated bool Upload []Multi CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` } // ListMulti returns the list of unfinished multipart uploads in b. // // The prefix parameter limits the response to keys that begin with the // specified prefix. You can use prefixes to separate a bucket into different // groupings of keys (to get the feeling of folders, for example). // // The delim parameter causes the response to group all of the keys that // share a common prefix up to the next delimiter in a single entry within // the CommonPrefixes field. You can use delimiters to separate a bucket // into different groupings of keys, similar to how folders would work. // // See http://goo.gl/ePioY for details. func (b *Bucket) ListMulti(prefix, delim string) (multis []*Multi, prefixes []string, err error) { params := map[string][]string{ "uploads": {""}, "max-uploads": {strconv.FormatInt(int64(listMultiMax), 10)}, "prefix": {prefix}, "delimiter": {delim}, } for attempt := attempts.Start(); attempt.Next(); { req := &request{ method: "GET", bucket: b.Name, params: params, } var resp listMultiResp err := b.S3.query(req, &resp) if shouldRetry(err) && attempt.HasNext() { continue } if err != nil { return nil, nil, err } for i := range resp.Upload { multi := &resp.Upload[i] multi.Bucket = b multis = append(multis, multi) } prefixes = append(prefixes, resp.CommonPrefixes...) if !resp.IsTruncated { return multis, prefixes, nil } params["key-marker"] = []string{resp.NextKeyMarker} params["upload-id-marker"] = []string{resp.NextUploadIdMarker} attempt = attempts.Start() // Last request worked. } panic("unreachable") } // Multi returns a multipart upload handler for the provided key // inside b. If a multipart upload exists for key, it is returned, // otherwise a new multipart upload is initiated with contType and perm. func (b *Bucket) Multi(key, contType string, perm ACL) (*Multi, error) { multis, _, err := b.ListMulti(key, "") if err != nil && !hasCode(err, "NoSuchUpload") { return nil, err } for _, m := range multis { if m.Key == key { return m, nil } } return b.InitMulti(key, contType, perm) } // InitMulti initializes a new multipart upload at the provided // key inside b and returns a value for manipulating it. // // See http://goo.gl/XP8kL for details. func (b *Bucket) InitMulti(key string, contType string, perm ACL) (*Multi, error) { headers := map[string][]string{ "Content-Type": {contType}, "Content-Length": {"0"}, "x-amz-acl": {string(perm)}, } params := map[string][]string{ "uploads": {""}, } req := &request{ method: "POST", bucket: b.Name, path: key, headers: headers, params: params, } var err error var resp struct { UploadId string `xml:"UploadId"` } for attempt := attempts.Start(); attempt.Next(); { err = b.S3.query(req, &resp) if !shouldRetry(err) { break } } if err != nil { return nil, err } return &Multi{Bucket: b, Key: key, UploadId: resp.UploadId}, nil } // PutPart sends part n of the multipart upload, reading all the content from r. // Each part, except for the last one, must be at least 5MB in size. // // See http://goo.gl/pqZer for details. func (m *Multi) PutPart(n int, r io.ReadSeeker) (Part, error) { partSize, _, md5b64, err := seekerInfo(r) if err != nil { return Part{}, err } return m.putPart(n, r, partSize, md5b64) } func (m *Multi) putPart(n int, r io.ReadSeeker, partSize int64, md5b64 string) (Part, error) { headers := map[string][]string{ "Content-Length": {strconv.FormatInt(partSize, 10)}, "Content-MD5": {md5b64}, } params := map[string][]string{ "uploadId": {m.UploadId}, "partNumber": {strconv.FormatInt(int64(n), 10)}, } for attempt := attempts.Start(); attempt.Next(); { _, err := r.Seek(0, 0) if err != nil { return Part{}, err } req := &request{ method: "PUT", bucket: m.Bucket.Name, path: m.Key, headers: headers, params: params, payload: r, } err = m.Bucket.S3.prepare(req) if err != nil { return Part{}, err } resp, err := m.Bucket.S3.run(req, nil) if shouldRetry(err) && attempt.HasNext() { continue } if err != nil { return Part{}, err } etag := resp.Header.Get("ETag") if etag == "" { return Part{}, errors.New("part upload succeeded with no ETag") } return Part{n, etag, partSize}, nil } panic("unreachable") } func seekerInfo(r io.ReadSeeker) (size int64, md5hex string, md5b64 string, err error) { _, err = r.Seek(0, 0) if err != nil { return 0, "", "", err } digest := md5.New() size, err = io.Copy(digest, r) if err != nil { return 0, "", "", err } sum := digest.Sum(nil) md5hex = hex.EncodeToString(sum) md5b64 = base64.StdEncoding.EncodeToString(sum) return size, md5hex, md5b64, nil } type Part struct { N int `xml:"PartNumber"` ETag string Size int64 } type partSlice []Part func (s partSlice) Len() int { return len(s) } func (s partSlice) Less(i, j int) bool { return s[i].N < s[j].N } func (s partSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } type listPartsResp struct { NextPartNumberMarker string IsTruncated bool Part []Part } // That's the default. Here just for testing. var listPartsMax = 1000 // ListParts returns the list of previously uploaded parts in m, // ordered by part number. // // See http://goo.gl/ePioY for details. func (m *Multi) ListParts() ([]Part, error) { params := map[string][]string{ "uploadId": {m.UploadId}, "max-parts": {strconv.FormatInt(int64(listPartsMax), 10)}, } var parts partSlice for attempt := attempts.Start(); attempt.Next(); { req := &request{ method: "GET", bucket: m.Bucket.Name, path: m.Key, params: params, } var resp listPartsResp err := m.Bucket.S3.query(req, &resp) if shouldRetry(err) && attempt.HasNext() { continue } if err != nil { return nil, err } parts = append(parts, resp.Part...) if !resp.IsTruncated { sort.Sort(parts) return parts, nil } params["part-number-marker"] = []string{resp.NextPartNumberMarker} attempt = attempts.Start() // Last request worked. } panic("unreachable") } type ReaderAtSeeker interface { io.ReaderAt io.ReadSeeker } // PutAll sends all of r via a multipart upload with parts no larger // than partSize bytes, which must be set to at least 5MB. // Parts previously uploaded are either reused if their checksum // and size match the new part, or otherwise overwritten with the // new content. // PutAll returns all the parts of m (reused or not). func (m *Multi) PutAll(r ReaderAtSeeker, partSize int64) ([]Part, error) { old, err := m.ListParts() if err != nil && !hasCode(err, "NoSuchUpload") { return nil, err } reuse := 0 // Index of next old part to consider reusing. current := 1 // Part number of latest good part handled. totalSize, err := r.Seek(0, 2) if err != nil { return nil, err } first := true // Must send at least one empty part if the file is empty. var result []Part NextSection: for offset := int64(0); offset < totalSize || first; offset += partSize { first = false if offset+partSize > totalSize { partSize = totalSize - offset } section := io.NewSectionReader(r, offset, partSize) _, md5hex, md5b64, err := seekerInfo(section) if err != nil { return nil, err } for reuse < len(old) && old[reuse].N <= current { // Looks like this part was already sent. part := &old[reuse] etag := `"` + md5hex + `"` if part.N == current && part.Size == partSize && part.ETag == etag { // Checksum matches. Reuse the old part. result = append(result, *part) current++ continue NextSection } reuse++ } // Part wasn't found or doesn't match. Send it. part, err := m.putPart(current, section, partSize, md5b64) if err != nil { return nil, err } result = append(result, part) current++ } return result, nil } type completeUpload struct { XMLName xml.Name `xml:"CompleteMultipartUpload"` Parts completeParts `xml:"Part"` } type completePart struct { PartNumber int ETag string } type completeParts []completePart func (p completeParts) Len() int { return len(p) } func (p completeParts) Less(i, j int) bool { return p[i].PartNumber < p[j].PartNumber } func (p completeParts) Swap(i, j int) { p[i], p[j] = p[j], p[i] } // Complete assembles the given previously uploaded parts into the // final object. This operation may take several minutes. // // See http://goo.gl/2Z7Tw for details. func (m *Multi) Complete(parts []Part) error { params := map[string][]string{ "uploadId": {m.UploadId}, } c := completeUpload{} for _, p := range parts { c.Parts = append(c.Parts, completePart{p.N, p.ETag}) } sort.Sort(c.Parts) data, err := xml.Marshal(&c) if err != nil { return err } for attempt := attempts.Start(); attempt.Next(); { req := &request{ method: "POST", bucket: m.Bucket.Name, path: m.Key, params: params, payload: bytes.NewReader(data), } err := m.Bucket.S3.query(req, nil) if shouldRetry(err) && attempt.HasNext() { continue } return err } panic("unreachable") } // Abort deletes an unifinished multipart upload and any previously // uploaded parts for it. // // After a multipart upload is aborted, no additional parts can be // uploaded using it. However, if any part uploads are currently in // progress, those part uploads might or might not succeed. As a result, // it might be necessary to abort a given multipart upload multiple // times in order to completely free all storage consumed by all parts. // // NOTE: If the described scenario happens to you, please report back to // the goamz authors with details. In the future such retrying should be // handled internally, but it's not clear what happens precisely (Is an // error returned? Is the issue completely undetectable?). // // See http://goo.gl/dnyJw for details. func (m *Multi) Abort() error { params := map[string][]string{ "uploadId": {m.UploadId}, } for attempt := attempts.Start(); attempt.Next(); { req := &request{ method: "DELETE", bucket: m.Bucket.Name, path: m.Key, params: params, } err := m.Bucket.S3.query(req, nil) if shouldRetry(err) && attempt.HasNext() { continue } return err } panic("unreachable") } ================================================ FILE: vendor/github.com/mitchellh/goamz/s3/s3.go ================================================ // // goamz - Go packages to interact with the Amazon Web Services. // // https://wiki.ubuntu.com/goamz // // Copyright (c) 2011 Canonical Ltd. // // Written by Gustavo Niemeyer // package s3 import ( "bytes" "crypto/md5" "encoding/base64" "encoding/xml" "fmt" "github.com/mitchellh/goamz/aws" "io" "io/ioutil" "log" "net" "net/http" "net/http/httputil" "net/url" "strconv" "strings" "time" ) const debug = false // The S3 type encapsulates operations with an S3 region. type S3 struct { aws.Auth aws.Region HTTPClient func() *http.Client private byte // Reserve the right of using private data. } // The Bucket type encapsulates operations with an S3 bucket. type Bucket struct { *S3 Name string } // The Owner type represents the owner of the object in an S3 bucket. type Owner struct { ID string DisplayName string } var attempts = aws.AttemptStrategy{ Min: 5, Total: 5 * time.Second, Delay: 200 * time.Millisecond, } // New creates a new S3. func New(auth aws.Auth, region aws.Region) *S3 { return &S3{ Auth: auth, Region: region, HTTPClient: func() *http.Client { return http.DefaultClient }, private: 0} } // Bucket returns a Bucket with the given name. func (s3 *S3) Bucket(name string) *Bucket { if s3.Region.S3BucketEndpoint != "" || s3.Region.S3LowercaseBucket { name = strings.ToLower(name) } return &Bucket{s3, name} } var createBucketConfiguration = ` %s ` // locationConstraint returns an io.Reader specifying a LocationConstraint if // required for the region. // // See http://goo.gl/bh9Kq for details. func (s3 *S3) locationConstraint() io.Reader { constraint := "" if s3.Region.S3LocationConstraint { constraint = fmt.Sprintf(createBucketConfiguration, s3.Region.Name) } return strings.NewReader(constraint) } type ACL string const ( Private = ACL("private") PublicRead = ACL("public-read") PublicReadWrite = ACL("public-read-write") AuthenticatedRead = ACL("authenticated-read") BucketOwnerRead = ACL("bucket-owner-read") BucketOwnerFull = ACL("bucket-owner-full-control") ) // The ListBucketsResp type holds the results of a List buckets operation. type ListBucketsResp struct { Buckets []Bucket `xml:">Bucket"` } // ListBuckets lists all buckets // // See: http://goo.gl/NqlyMN func (s3 *S3) ListBuckets() (result *ListBucketsResp, err error) { req := &request{ path: "/", } result = &ListBucketsResp{} for attempt := attempts.Start(); attempt.Next(); { err = s3.query(req, result) if !shouldRetry(err) { break } } if err != nil { return nil, err } // set S3 instance on buckets for i := range result.Buckets { result.Buckets[i].S3 = s3 } return result, nil } // PutBucket creates a new bucket. // // See http://goo.gl/ndjnR for details. func (b *Bucket) PutBucket(perm ACL) error { headers := map[string][]string{ "x-amz-acl": {string(perm)}, } req := &request{ method: "PUT", bucket: b.Name, path: "/", headers: headers, payload: b.locationConstraint(), } return b.S3.query(req, nil) } // DelBucket removes an existing S3 bucket. All objects in the bucket must // be removed before the bucket itself can be removed. // // See http://goo.gl/GoBrY for details. func (b *Bucket) DelBucket() (err error) { req := &request{ method: "DELETE", bucket: b.Name, path: "/", } for attempt := attempts.Start(); attempt.Next(); { err = b.S3.query(req, nil) if !shouldRetry(err) { break } } return err } // Get retrieves an object from an S3 bucket. // // See http://goo.gl/isCO7 for details. func (b *Bucket) Get(path string) (data []byte, err error) { body, err := b.GetReader(path) if err != nil { return nil, err } data, err = ioutil.ReadAll(body) body.Close() return data, err } // GetReader retrieves an object from an S3 bucket. // It is the caller's responsibility to call Close on rc when // finished reading. func (b *Bucket) GetReader(path string) (rc io.ReadCloser, err error) { resp, err := b.GetResponse(path) if resp != nil { return resp.Body, err } return nil, err } // GetResponse retrieves an object from an S3 bucket returning the http response // It is the caller's responsibility to call Close on rc when // finished reading. func (b *Bucket) GetResponse(path string) (*http.Response, error) { return b.getResponseParams(path, nil) } // GetTorrent retrieves an Torrent object from an S3 bucket an io.ReadCloser. // It is the caller's responsibility to call Close on rc when finished reading. func (b *Bucket) GetTorrentReader(path string) (io.ReadCloser, error) { resp, err := b.getResponseParams(path, url.Values{"torrent": {""}}) if err != nil { return nil, err } return resp.Body, nil } // GetTorrent retrieves an Torrent object from an S3, returning // the torrent as a []byte. func (b *Bucket) GetTorrent(path string) ([]byte, error) { body, err := b.GetTorrentReader(path) if err != nil { return nil, err } defer body.Close() return ioutil.ReadAll(body) } func (b *Bucket) getResponseParams(path string, params url.Values) (*http.Response, error) { req := &request{ bucket: b.Name, path: path, params: params, } err := b.S3.prepare(req) if err != nil { return nil, err } for attempt := attempts.Start(); attempt.Next(); { resp, err := b.S3.run(req, nil) if shouldRetry(err) && attempt.HasNext() { continue } if err != nil { return nil, err } return resp, nil } panic("unreachable") } func (b *Bucket) Head(path string) (*http.Response, error) { req := &request{ method: "HEAD", bucket: b.Name, path: path, } err := b.S3.prepare(req) if err != nil { return nil, err } for attempt := attempts.Start(); attempt.Next(); { resp, err := b.S3.run(req, nil) if shouldRetry(err) && attempt.HasNext() { continue } if err != nil { return nil, err } return resp, nil } panic("unreachable") } // Put inserts an object into the S3 bucket. // // See http://goo.gl/FEBPD for details. func (b *Bucket) Put(path string, data []byte, contType string, perm ACL) error { body := bytes.NewBuffer(data) return b.PutReader(path, body, int64(len(data)), contType, perm) } /* PutHeader - like Put, inserts an object into the S3 bucket. Instead of Content-Type string, pass in custom headers to override defaults. */ func (b *Bucket) PutHeader(path string, data []byte, customHeaders map[string][]string, perm ACL) error { body := bytes.NewBuffer(data) return b.PutReaderHeader(path, body, int64(len(data)), customHeaders, perm) } // PutReader inserts an object into the S3 bucket by consuming data // from r until EOF. func (b *Bucket) PutReader(path string, r io.Reader, length int64, contType string, perm ACL) error { headers := map[string][]string{ "Content-Length": {strconv.FormatInt(length, 10)}, "Content-Type": {contType}, "x-amz-acl": {string(perm)}, } req := &request{ method: "PUT", bucket: b.Name, path: path, headers: headers, payload: r, } return b.S3.query(req, nil) } /* PutReaderHeader - like PutReader, inserts an object into S3 from a reader. Instead of Content-Type string, pass in custom headers to override defaults. */ func (b *Bucket) PutReaderHeader(path string, r io.Reader, length int64, customHeaders map[string][]string, perm ACL) error { // Default headers headers := map[string][]string{ "Content-Length": {strconv.FormatInt(length, 10)}, "Content-Type": {"application/text"}, "x-amz-acl": {string(perm)}, } // Override with custom headers for key, value := range customHeaders { headers[key] = value } req := &request{ method: "PUT", bucket: b.Name, path: path, headers: headers, payload: r, } return b.S3.query(req, nil) } /* Copy - copy objects inside bucket */ func (b *Bucket) Copy(oldPath, newPath string, perm ACL) error { if !strings.HasPrefix(oldPath, "/") { oldPath = "/" + oldPath } req := &request{ method: "PUT", bucket: b.Name, path: newPath, headers: map[string][]string{ "x-amz-copy-source": {amazonEscape("/" + b.Name + oldPath)}, "x-amz-acl": {string(perm)}, }, } err := b.S3.prepare(req) if err != nil { return err } for attempt := attempts.Start(); attempt.Next(); { _, err = b.S3.run(req, nil) if shouldRetry(err) && attempt.HasNext() { continue } if err != nil { return err } return nil } panic("unreachable") } // Del removes an object from the S3 bucket. // // See http://goo.gl/APeTt for details. func (b *Bucket) Del(path string) error { req := &request{ method: "DELETE", bucket: b.Name, path: path, } return b.S3.query(req, nil) } type Object struct { Key string } type MultiObjectDeleteBody struct { XMLName xml.Name `xml:"Delete"` Quiet bool Object []Object } func base64md5(data []byte) string { h := md5.New() h.Write(data) return base64.StdEncoding.EncodeToString(h.Sum(nil)) } // MultiDel removes multiple objects from the S3 bucket efficiently. // A maximum of 1000 keys at once may be specified. // // See http://goo.gl/WvA5sj for details. func (b *Bucket) MultiDel(paths []string) error { // create XML payload v := MultiObjectDeleteBody{} v.Object = make([]Object, len(paths)) for i, path := range paths { v.Object[i] = Object{path} } data, _ := xml.Marshal(v) // Content-MD5 is required md5hash := base64md5(data) req := &request{ method: "POST", bucket: b.Name, path: "/", params: url.Values{"delete": {""}}, headers: http.Header{"Content-MD5": {md5hash}}, payload: bytes.NewReader(data), } return b.S3.query(req, nil) } // The ListResp type holds the results of a List bucket operation. type ListResp struct { Name string Prefix string Delimiter string Marker string NextMarker string MaxKeys int // IsTruncated is true if the results have been truncated because // there are more keys and prefixes than can fit in MaxKeys. // N.B. this is the opposite sense to that documented (incorrectly) in // http://goo.gl/YjQTc IsTruncated bool Contents []Key CommonPrefixes []string `xml:">Prefix"` } // The Key type represents an item stored in an S3 bucket. type Key struct { Key string LastModified string Size int64 // ETag gives the hex-encoded MD5 sum of the contents, // surrounded with double-quotes. ETag string StorageClass string Owner Owner } // List returns information about objects in an S3 bucket. // // The prefix parameter limits the response to keys that begin with the // specified prefix. // // The delim parameter causes the response to group all of the keys that // share a common prefix up to the next delimiter in a single entry within // the CommonPrefixes field. You can use delimiters to separate a bucket // into different groupings of keys, similar to how folders would work. // // The marker parameter specifies the key to start with when listing objects // in a bucket. Amazon S3 lists objects in alphabetical order and // will return keys alphabetically greater than the marker. // // The max parameter specifies how many keys + common prefixes to return in // the response. The default is 1000. // // For example, given these keys in a bucket: // // index.html // index2.html // photos/2006/January/sample.jpg // photos/2006/February/sample2.jpg // photos/2006/February/sample3.jpg // photos/2006/February/sample4.jpg // // Listing this bucket with delimiter set to "/" would yield the // following result: // // &ListResp{ // Name: "sample-bucket", // MaxKeys: 1000, // Delimiter: "/", // Contents: []Key{ // {Key: "index.html", "index2.html"}, // }, // CommonPrefixes: []string{ // "photos/", // }, // } // // Listing the same bucket with delimiter set to "/" and prefix set to // "photos/2006/" would yield the following result: // // &ListResp{ // Name: "sample-bucket", // MaxKeys: 1000, // Delimiter: "/", // Prefix: "photos/2006/", // CommonPrefixes: []string{ // "photos/2006/February/", // "photos/2006/January/", // }, // } // // See http://goo.gl/YjQTc for details. func (b *Bucket) List(prefix, delim, marker string, max int) (result *ListResp, err error) { params := map[string][]string{ "prefix": {prefix}, "delimiter": {delim}, "marker": {marker}, } if max != 0 { params["max-keys"] = []string{strconv.FormatInt(int64(max), 10)} } req := &request{ bucket: b.Name, params: params, } result = &ListResp{} for attempt := attempts.Start(); attempt.Next(); { err = b.S3.query(req, result) if !shouldRetry(err) { break } } if err != nil { return nil, err } return result, nil } // Returns a mapping of all key names in this bucket to Key objects func (b *Bucket) GetBucketContents() (*map[string]Key, error) { bucket_contents := map[string]Key{} prefix := "" path_separator := "" marker := "" for { contents, err := b.List(prefix, path_separator, marker, 1000) if err != nil { return &bucket_contents, err } last_key := "" for _, key := range contents.Contents { bucket_contents[key.Key] = key last_key = key.Key } if contents.IsTruncated { marker = contents.NextMarker if marker == "" { // From the s3 docs: If response does not include the // NextMarker and it is truncated, you can use the value of the // last Key in the response as the marker in the subsequent // request to get the next set of object keys. marker = last_key } } else { break } } return &bucket_contents, nil } // Get metadata from the key without returning the key content func (b *Bucket) GetKey(path string) (*Key, error) { req := &request{ bucket: b.Name, path: path, method: "HEAD", } err := b.S3.prepare(req) if err != nil { return nil, err } key := &Key{} for attempt := attempts.Start(); attempt.Next(); { resp, err := b.S3.run(req, nil) if shouldRetry(err) && attempt.HasNext() { continue } if err != nil { return nil, err } key.Key = path key.LastModified = resp.Header.Get("Last-Modified") key.ETag = resp.Header.Get("ETag") contentLength := resp.Header.Get("Content-Length") size, err := strconv.ParseInt(contentLength, 10, 64) if err != nil { return key, fmt.Errorf("bad s3 content-length %v: %v", contentLength, err) } key.Size = size return key, nil } panic("unreachable") } // URL returns a non-signed URL that allows retriving the // object at path. It only works if the object is publicly // readable (see SignedURL). func (b *Bucket) URL(path string) string { req := &request{ bucket: b.Name, path: path, } err := b.S3.prepare(req) if err != nil { panic(err) } u, err := req.url(true) if err != nil { panic(err) } u.RawQuery = "" return u.String() } // SignedURL returns a signed URL that allows anyone holding the URL // to retrieve the object at path. The signature is valid until expires. func (b *Bucket) SignedURL(path string, expires time.Time) string { req := &request{ bucket: b.Name, path: path, params: url.Values{"Expires": {strconv.FormatInt(expires.Unix(), 10)}}, } err := b.S3.prepare(req) if err != nil { panic(err) } u, err := req.url(true) if err != nil { panic(err) } return u.String() } type request struct { method string bucket string path string signpath string params url.Values headers http.Header baseurl string payload io.Reader prepared bool } // amazonShouldEscape returns true if byte should be escaped func amazonShouldEscape(c byte) bool { return !((c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') || (c >= '0' && c <= '9') || c == '_' || c == '-' || c == '~' || c == '.' || c == '/' || c == ':') } // amazonEscape does uri escaping exactly as Amazon does func amazonEscape(s string) string { hexCount := 0 for i := 0; i < len(s); i++ { if amazonShouldEscape(s[i]) { hexCount++ } } if hexCount == 0 { return s } t := make([]byte, len(s)+2*hexCount) j := 0 for i := 0; i < len(s); i++ { if c := s[i]; amazonShouldEscape(c) { t[j] = '%' t[j+1] = "0123456789ABCDEF"[c>>4] t[j+2] = "0123456789ABCDEF"[c&15] j += 3 } else { t[j] = s[i] j++ } } return string(t) } // url returns url to resource, either full (with host/scheme) or // partial for HTTP request func (req *request) url(full bool) (*url.URL, error) { u, err := url.Parse(req.baseurl) if err != nil { return nil, fmt.Errorf("bad S3 endpoint URL %q: %v", req.baseurl, err) } u.Opaque = amazonEscape(req.path) if full { u.Opaque = "//" + u.Host + u.Opaque } u.RawQuery = req.params.Encode() return u, nil } // query prepares and runs the req request. // If resp is not nil, the XML data contained in the response // body will be unmarshalled on it. func (s3 *S3) query(req *request, resp interface{}) error { err := s3.prepare(req) if err == nil { var httpResponse *http.Response httpResponse, err = s3.run(req, resp) if resp == nil && httpResponse != nil { httpResponse.Body.Close() } } return err } // prepare sets up req to be delivered to S3. func (s3 *S3) prepare(req *request) error { if !req.prepared { req.prepared = true if req.method == "" { req.method = "GET" } // Copy so they can be mutated without affecting on retries. params := make(url.Values) headers := make(http.Header) for k, v := range req.params { params[k] = v } for k, v := range req.headers { headers[k] = v } req.params = params req.headers = headers if !strings.HasPrefix(req.path, "/") { req.path = "/" + req.path } req.signpath = req.path if req.bucket != "" { req.baseurl = s3.Region.S3BucketEndpoint if req.baseurl == "" { // Use the path method to address the bucket. req.baseurl = s3.Region.S3Endpoint req.path = "/" + req.bucket + req.path } else { // Just in case, prevent injection. if strings.IndexAny(req.bucket, "/:@") >= 0 { return fmt.Errorf("bad S3 bucket: %q", req.bucket) } req.baseurl = strings.Replace(req.baseurl, "${bucket}", req.bucket, -1) } req.signpath = "/" + req.bucket + req.signpath } else { req.baseurl = s3.Region.S3Endpoint } } // Always sign again as it's not clear how far the // server has handled a previous attempt. u, err := url.Parse(req.baseurl) if err != nil { return fmt.Errorf("bad S3 endpoint URL %q: %v", req.baseurl, err) } req.headers["Host"] = []string{u.Host} req.headers["Date"] = []string{time.Now().In(time.UTC).Format(time.RFC1123)} sign(s3.Auth, req.method, amazonEscape(req.signpath), req.params, req.headers) return nil } // run sends req and returns the http response from the server. // If resp is not nil, the XML data contained in the response // body will be unmarshalled on it. func (s3 *S3) run(req *request, resp interface{}) (*http.Response, error) { if debug { log.Printf("Running S3 request: %#v", req) } u, err := req.url(false) if err != nil { return nil, err } hreq := http.Request{ URL: u, Method: req.method, ProtoMajor: 1, ProtoMinor: 1, Close: true, Header: req.headers, } if v, ok := req.headers["Content-Length"]; ok { hreq.ContentLength, _ = strconv.ParseInt(v[0], 10, 64) delete(req.headers, "Content-Length") } if req.payload != nil { hreq.Body = ioutil.NopCloser(req.payload) } hresp, err := s3.HTTPClient().Do(&hreq) if err != nil { return nil, err } if debug { dump, _ := httputil.DumpResponse(hresp, true) log.Printf("} -> %s\n", dump) } if hresp.StatusCode != 200 && hresp.StatusCode != 204 { defer hresp.Body.Close() return nil, buildError(hresp) } if resp != nil { err = xml.NewDecoder(hresp.Body).Decode(resp) hresp.Body.Close() } return hresp, err } // Error represents an error in an operation with S3. type Error struct { StatusCode int // HTTP status code (200, 403, ...) Code string // EC2 error code ("UnsupportedOperation", ...) Message string // The human-oriented error message BucketName string RequestId string HostId string } func (e *Error) Error() string { return e.Message } func buildError(r *http.Response) error { if debug { log.Printf("got error (status code %v)", r.StatusCode) data, err := ioutil.ReadAll(r.Body) if err != nil { log.Printf("\tread error: %v", err) } else { log.Printf("\tdata:\n%s\n\n", data) } r.Body = ioutil.NopCloser(bytes.NewBuffer(data)) } err := Error{} // TODO return error if Unmarshal fails? xml.NewDecoder(r.Body).Decode(&err) r.Body.Close() err.StatusCode = r.StatusCode if err.Message == "" { err.Message = r.Status } if debug { log.Printf("err: %#v\n", err) } return &err } func shouldRetry(err error) bool { if err == nil { return false } switch err { case io.ErrUnexpectedEOF, io.EOF: return true } switch e := err.(type) { case *net.DNSError: return true case *net.OpError: switch e.Op { case "read", "write": return true } case *Error: switch e.Code { case "InternalError", "NoSuchUpload", "NoSuchBucket": return true } } return false } func hasCode(err error, code string) bool { s3err, ok := err.(*Error) return ok && s3err.Code == code } ================================================ FILE: vendor/github.com/mitchellh/goamz/s3/s3test/server.go ================================================ package s3test import ( "bytes" "crypto/md5" "encoding/hex" "encoding/xml" "fmt" "github.com/mitchellh/goamz/s3" "io" "io/ioutil" "log" "net" "net/http" "net/url" "regexp" "sort" "strconv" "strings" "sync" "time" ) const debug = false type s3Error struct { statusCode int XMLName struct{} `xml:"Error"` Code string Message string BucketName string RequestId string HostId string } type action struct { srv *Server w http.ResponseWriter req *http.Request reqId string } // Config controls the internal behaviour of the Server. A nil config is the default // and behaves as if all configurations assume their default behaviour. Once passed // to NewServer, the configuration must not be modified. type Config struct { // Send409Conflict controls how the Server will respond to calls to PUT on a // previously existing bucket. The default is false, and corresponds to the // us-east-1 s3 enpoint. Setting this value to true emulates the behaviour of // all other regions. // http://docs.amazonwebservices.com/AmazonS3/latest/API/ErrorResponses.html Send409Conflict bool } func (c *Config) send409Conflict() bool { if c != nil { return c.Send409Conflict } return false } // Server is a fake S3 server for testing purposes. // All of the data for the server is kept in memory. type Server struct { url string reqId int listener net.Listener mu sync.Mutex buckets map[string]*bucket config *Config } type bucket struct { name string acl s3.ACL ctime time.Time objects map[string]*object } type object struct { name string mtime time.Time meta http.Header // metadata to return with requests. checksum []byte // also held as Content-MD5 in meta. data []byte } // A resource encapsulates the subject of an HTTP request. // The resource referred to may or may not exist // when the request is made. type resource interface { put(a *action) interface{} get(a *action) interface{} post(a *action) interface{} delete(a *action) interface{} } func NewServer(config *Config) (*Server, error) { l, err := net.Listen("tcp", "localhost:0") if err != nil { return nil, fmt.Errorf("cannot listen on localhost: %v", err) } srv := &Server{ listener: l, url: "http://" + l.Addr().String(), buckets: make(map[string]*bucket), config: config, } go http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { srv.serveHTTP(w, req) })) return srv, nil } // Quit closes down the server. func (srv *Server) Quit() { srv.listener.Close() } // URL returns a URL for the server. func (srv *Server) URL() string { return srv.url } func fatalf(code int, codeStr string, errf string, a ...interface{}) { panic(&s3Error{ statusCode: code, Code: codeStr, Message: fmt.Sprintf(errf, a...), }) } // serveHTTP serves the S3 protocol. func (srv *Server) serveHTTP(w http.ResponseWriter, req *http.Request) { // ignore error from ParseForm as it's usually spurious. req.ParseForm() srv.mu.Lock() defer srv.mu.Unlock() if debug { log.Printf("s3test %q %q", req.Method, req.URL) } a := &action{ srv: srv, w: w, req: req, reqId: fmt.Sprintf("%09X", srv.reqId), } srv.reqId++ var r resource defer func() { switch err := recover().(type) { case *s3Error: switch r := r.(type) { case objectResource: err.BucketName = r.bucket.name case bucketResource: err.BucketName = r.name } err.RequestId = a.reqId // TODO HostId w.Header().Set("Content-Type", `xml version="1.0" encoding="UTF-8"`) w.WriteHeader(err.statusCode) xmlMarshal(w, err) case nil: default: panic(err) } }() r = srv.resourceForURL(req.URL) var resp interface{} switch req.Method { case "PUT": resp = r.put(a) case "GET", "HEAD": resp = r.get(a) case "DELETE": resp = r.delete(a) case "POST": resp = r.post(a) default: fatalf(400, "MethodNotAllowed", "unknown http request method %q", req.Method) } if resp != nil && req.Method != "HEAD" { xmlMarshal(w, resp) } } // xmlMarshal is the same as xml.Marshal except that // it panics on error. The marshalling should not fail, // but we want to know if it does. func xmlMarshal(w io.Writer, x interface{}) { if err := xml.NewEncoder(w).Encode(x); err != nil { panic(fmt.Errorf("error marshalling %#v: %v", x, err)) } } // In a fully implemented test server, each of these would have // its own resource type. var unimplementedBucketResourceNames = map[string]bool{ "acl": true, "lifecycle": true, "policy": true, "location": true, "logging": true, "notification": true, "versions": true, "requestPayment": true, "versioning": true, "website": true, "uploads": true, } var unimplementedObjectResourceNames = map[string]bool{ "uploadId": true, "acl": true, "torrent": true, "uploads": true, } var pathRegexp = regexp.MustCompile("/(([^/]+)(/(.*))?)?") // resourceForURL returns a resource object for the given URL. func (srv *Server) resourceForURL(u *url.URL) (r resource) { if u.Path == "/" { return serviceResource{ buckets: srv.buckets, } } m := pathRegexp.FindStringSubmatch(u.Path) if m == nil { fatalf(404, "InvalidURI", "Couldn't parse the specified URI") } bucketName := m[2] objectName := m[4] if bucketName == "" { return nullResource{} // root } b := bucketResource{ name: bucketName, bucket: srv.buckets[bucketName], } q := u.Query() if objectName == "" { for name := range q { if unimplementedBucketResourceNames[name] { return nullResource{} } } return b } if b.bucket == nil { fatalf(404, "NoSuchBucket", "The specified bucket does not exist") } objr := objectResource{ name: objectName, version: q.Get("versionId"), bucket: b.bucket, } for name := range q { if unimplementedObjectResourceNames[name] { return nullResource{} } } if obj := objr.bucket.objects[objr.name]; obj != nil { objr.object = obj } return objr } // nullResource has error stubs for all resource methods. type nullResource struct{} func notAllowed() interface{} { fatalf(400, "MethodNotAllowed", "The specified method is not allowed against this resource") return nil } func (nullResource) put(a *action) interface{} { return notAllowed() } func (nullResource) get(a *action) interface{} { return notAllowed() } func (nullResource) post(a *action) interface{} { return notAllowed() } func (nullResource) delete(a *action) interface{} { return notAllowed() } const timeFormat = "2006-01-02T15:04:05.000Z07:00" type serviceResource struct { buckets map[string]*bucket } func (serviceResource) put(a *action) interface{} { return notAllowed() } func (serviceResource) post(a *action) interface{} { return notAllowed() } func (serviceResource) delete(a *action) interface{} { return notAllowed() } // GET on an s3 service lists the buckets. // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTServiceGET.html func (r serviceResource) get(a *action) interface{} { type respBucket struct { Name string } type response struct { Buckets []respBucket `xml:">Bucket"` } resp := response{} for _, bucketPtr := range r.buckets { bkt := respBucket{ Name: bucketPtr.name, } resp.Buckets = append(resp.Buckets, bkt) } return &resp } type bucketResource struct { name string bucket *bucket // non-nil if the bucket already exists. } // GET on a bucket lists the objects in the bucket. // http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGET.html func (r bucketResource) get(a *action) interface{} { if r.bucket == nil { fatalf(404, "NoSuchBucket", "The specified bucket does not exist") } delimiter := a.req.Form.Get("delimiter") marker := a.req.Form.Get("marker") maxKeys := -1 if s := a.req.Form.Get("max-keys"); s != "" { i, err := strconv.Atoi(s) if err != nil || i < 0 { fatalf(400, "invalid value for max-keys: %q", s) } maxKeys = i } prefix := a.req.Form.Get("prefix") a.w.Header().Set("Content-Type", "application/xml") if a.req.Method == "HEAD" { return nil } var objs orderedObjects // first get all matching objects and arrange them in alphabetical order. for name, obj := range r.bucket.objects { if strings.HasPrefix(name, prefix) { objs = append(objs, obj) } } sort.Sort(objs) if maxKeys <= 0 { maxKeys = 1000 } resp := &s3.ListResp{ Name: r.bucket.name, Prefix: prefix, Delimiter: delimiter, Marker: marker, MaxKeys: maxKeys, } var prefixes []string for _, obj := range objs { if !strings.HasPrefix(obj.name, prefix) { continue } name := obj.name isPrefix := false if delimiter != "" { if i := strings.Index(obj.name[len(prefix):], delimiter); i >= 0 { name = obj.name[:len(prefix)+i+len(delimiter)] if prefixes != nil && prefixes[len(prefixes)-1] == name { continue } isPrefix = true } } if name <= marker { continue } if len(resp.Contents)+len(prefixes) >= maxKeys { resp.IsTruncated = true break } if isPrefix { prefixes = append(prefixes, name) } else { // Contents contains only keys not found in CommonPrefixes resp.Contents = append(resp.Contents, obj.s3Key()) } } resp.CommonPrefixes = prefixes return resp } // orderedObjects holds a slice of objects that can be sorted // by name. type orderedObjects []*object func (s orderedObjects) Len() int { return len(s) } func (s orderedObjects) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s orderedObjects) Less(i, j int) bool { return s[i].name < s[j].name } func (obj *object) s3Key() s3.Key { return s3.Key{ Key: obj.name, LastModified: obj.mtime.Format(timeFormat), Size: int64(len(obj.data)), ETag: fmt.Sprintf(`"%x"`, obj.checksum), // TODO StorageClass // TODO Owner } } // DELETE on a bucket deletes the bucket if it's not empty. func (r bucketResource) delete(a *action) interface{} { b := r.bucket if b == nil { fatalf(404, "NoSuchBucket", "The specified bucket does not exist") } if len(b.objects) > 0 { fatalf(400, "BucketNotEmpty", "The bucket you tried to delete is not empty") } delete(a.srv.buckets, b.name) return nil } // PUT on a bucket creates the bucket. // http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUT.html func (r bucketResource) put(a *action) interface{} { var created bool if r.bucket == nil { if !validBucketName(r.name) { fatalf(400, "InvalidBucketName", "The specified bucket is not valid") } if loc := locationConstraint(a); loc == "" { fatalf(400, "InvalidRequets", "The unspecified location constraint is incompatible for the region specific endpoint this request was sent to.") } // TODO validate acl r.bucket = &bucket{ name: r.name, // TODO default acl objects: make(map[string]*object), } a.srv.buckets[r.name] = r.bucket created = true } if !created && a.srv.config.send409Conflict() { fatalf(409, "BucketAlreadyOwnedByYou", "Your previous request to create the named bucket succeeded and you already own it.") } r.bucket.acl = s3.ACL(a.req.Header.Get("x-amz-acl")) return nil } func (bucketResource) post(a *action) interface{} { fatalf(400, "Method", "bucket POST method not available") return nil } // validBucketName returns whether name is a valid bucket name. // Here are the rules, from: // http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/BucketRestrictions.html // // Can contain lowercase letters, numbers, periods (.), underscores (_), // and dashes (-). You can use uppercase letters for buckets only in the // US Standard region. // // Must start with a number or letter // // Must be between 3 and 255 characters long // // There's one extra rule (Must not be formatted as an IP address (e.g., 192.168.5.4) // but the real S3 server does not seem to check that rule, so we will not // check it either. // func validBucketName(name string) bool { if len(name) < 3 || len(name) > 255 { return false } r := name[0] if !(r >= '0' && r <= '9' || r >= 'a' && r <= 'z') { return false } for _, r := range name { switch { case r >= '0' && r <= '9': case r >= 'a' && r <= 'z': case r == '_' || r == '-': case r == '.': default: return false } } return true } var responseParams = map[string]bool{ "content-type": true, "content-language": true, "expires": true, "cache-control": true, "content-disposition": true, "content-encoding": true, } type objectResource struct { name string version string bucket *bucket // always non-nil. object *object // may be nil. } // GET on an object gets the contents of the object. // http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGET.html func (objr objectResource) get(a *action) interface{} { obj := objr.object if obj == nil { fatalf(404, "NoSuchKey", "The specified key does not exist.") } h := a.w.Header() // add metadata for name, d := range obj.meta { h[name] = d } // override header values in response to request parameters. for name, vals := range a.req.Form { if strings.HasPrefix(name, "response-") { name = name[len("response-"):] if !responseParams[name] { continue } h.Set(name, vals[0]) } } if r := a.req.Header.Get("Range"); r != "" { fatalf(400, "NotImplemented", "range unimplemented") } // TODO Last-Modified-Since // TODO If-Modified-Since // TODO If-Unmodified-Since // TODO If-Match // TODO If-None-Match // TODO Connection: close ?? // TODO x-amz-request-id h.Set("Content-Length", fmt.Sprint(len(obj.data))) h.Set("ETag", hex.EncodeToString(obj.checksum)) h.Set("Last-Modified", obj.mtime.Format(time.RFC1123)) if a.req.Method == "HEAD" { return nil } // TODO avoid holding the lock when writing data. _, err := a.w.Write(obj.data) if err != nil { // we can't do much except just log the fact. log.Printf("error writing data: %v", err) } return nil } var metaHeaders = map[string]bool{ "Content-MD5": true, "x-amz-acl": true, "Content-Type": true, "Content-Encoding": true, "Content-Disposition": true, } // PUT on an object creates the object. func (objr objectResource) put(a *action) interface{} { // TODO Cache-Control header // TODO Expires header // TODO x-amz-server-side-encryption // TODO x-amz-storage-class // TODO is this correct, or should we erase all previous metadata? obj := objr.object if obj == nil { obj = &object{ name: objr.name, meta: make(http.Header), } } var expectHash []byte if c := a.req.Header.Get("Content-MD5"); c != "" { var err error expectHash, err = hex.DecodeString(c) if err != nil || len(expectHash) != md5.Size { fatalf(400, "InvalidDigest", "The Content-MD5 you specified was invalid") } } sum := md5.New() // TODO avoid holding lock while reading data. data, err := ioutil.ReadAll(io.TeeReader(a.req.Body, sum)) if err != nil { fatalf(400, "TODO", "read error") } gotHash := sum.Sum(nil) if expectHash != nil && bytes.Compare(gotHash, expectHash) != 0 { fatalf(400, "BadDigest", "The Content-MD5 you specified did not match what we received") } if a.req.ContentLength >= 0 && int64(len(data)) != a.req.ContentLength { fatalf(400, "IncompleteBody", "You did not provide the number of bytes specified by the Content-Length HTTP header") } // PUT request has been successful - save data and metadata for key, values := range a.req.Header { key = http.CanonicalHeaderKey(key) if metaHeaders[key] || strings.HasPrefix(key, "X-Amz-Meta-") { obj.meta[key] = values } } obj.data = data obj.checksum = gotHash obj.mtime = time.Now() objr.bucket.objects[objr.name] = obj return nil } func (objr objectResource) delete(a *action) interface{} { delete(objr.bucket.objects, objr.name) return nil } func (objr objectResource) post(a *action) interface{} { fatalf(400, "MethodNotAllowed", "The specified method is not allowed against this resource") return nil } type CreateBucketConfiguration struct { LocationConstraint string } // locationConstraint parses the request body (if present). // If there is no body, an empty string will be returned. func locationConstraint(a *action) string { var body bytes.Buffer if _, err := io.Copy(&body, a.req.Body); err != nil { fatalf(400, "InvalidRequest", err.Error()) } if body.Len() == 0 { return "" } var loc CreateBucketConfiguration if err := xml.NewDecoder(&body).Decode(&loc); err != nil { fatalf(400, "InvalidRequest", err.Error()) } return loc.LocationConstraint } ================================================ FILE: vendor/github.com/mitchellh/goamz/s3/sign.go ================================================ package s3 import ( "crypto/hmac" "crypto/sha1" "encoding/base64" "log" "sort" "strings" "github.com/mitchellh/goamz/aws" ) var b64 = base64.StdEncoding // ---------------------------------------------------------------------------- // S3 signing (http://goo.gl/G1LrK) var s3ParamsToSign = map[string]bool{ "acl": true, "delete": true, "location": true, "logging": true, "notification": true, "partNumber": true, "policy": true, "requestPayment": true, "torrent": true, "uploadId": true, "uploads": true, "versionId": true, "versioning": true, "versions": true, "response-content-type": true, "response-content-language": true, "response-expires": true, "response-cache-control": true, "response-content-disposition": true, "response-content-encoding": true, } func sign(auth aws.Auth, method, canonicalPath string, params, headers map[string][]string) { var md5, ctype, date, xamz string var xamzDate bool var sarray []string // add security token if auth.Token != "" { headers["x-amz-security-token"] = []string{auth.Token} } if auth.SecretKey == "" { // no auth secret; skip signing, e.g. for public read-only buckets. return } for k, v := range headers { k = strings.ToLower(k) switch k { case "content-md5": md5 = v[0] case "content-type": ctype = v[0] case "date": if !xamzDate { date = v[0] } default: if strings.HasPrefix(k, "x-amz-") { vall := strings.Join(v, ",") sarray = append(sarray, k+":"+vall) if k == "x-amz-date" { xamzDate = true date = "" } } } } if len(sarray) > 0 { sort.StringSlice(sarray).Sort() xamz = strings.Join(sarray, "\n") + "\n" } expires := false if v, ok := params["Expires"]; ok { // Query string request authentication alternative. expires = true date = v[0] params["AWSAccessKeyId"] = []string{auth.AccessKey} } sarray = sarray[0:0] for k, v := range params { if s3ParamsToSign[k] { for _, vi := range v { if vi == "" { sarray = append(sarray, k) } else { // "When signing you do not encode these values." sarray = append(sarray, k+"="+vi) } } } } if len(sarray) > 0 { sort.StringSlice(sarray).Sort() canonicalPath = canonicalPath + "?" + strings.Join(sarray, "&") } payload := method + "\n" + md5 + "\n" + ctype + "\n" + date + "\n" + xamz + canonicalPath hash := hmac.New(sha1.New, []byte(auth.SecretKey)) hash.Write([]byte(payload)) signature := make([]byte, b64.EncodedLen(hash.Size())) b64.Encode(signature, hash.Sum(nil)) if expires { params["Signature"] = []string{string(signature)} } else { headers["Authorization"] = []string{"AWS " + auth.AccessKey + ":" + string(signature)} } if debug { log.Printf("Signature payload: %q", payload) log.Printf("Signature: %q", signature) } } ================================================ FILE: vendor/github.com/trustmaster/go-aspell/README.md ================================================ # Aspell library bindings for Go GNU Aspell is a spell checking tool written in C/C++. This package provides simplified Aspell bindings for Go. It uses UTF-8 by default and encapsulates some Aspell internals. ## Getting started First make sure aspell library and headers are installed on your system. On Debian/Ubuntu you could install it this way: ``` sudo apt-get install aspell libaspell-dev ``` It you need some more dictionaries you can install them like this: ``` sudo apt-get install aspell-ua aspell-se ``` Then you can install the package using the Go tool: ``` go get github.com/trustmaster/go-aspell ``` ## Usage Here is a simple spell checker program using the aspell package: ```go package main import ( "github.com/trustmaster/go-aspell" "fmt" "os" "strings" ) func main() { // Get a word from cmd line arguments if len(os.Args) != 2 { fmt.Print("Usage: aspell_example word\n") return } word := os.Args[1] // Initialize the speller speller, err := aspell.NewSpeller(map[string]string{ "lang": "en_US", }) if err != nil { fmt.Errorf("Error: %s", err.Error()) return } defer speller.Delete() // Check and suggest if speller.Check(word) { fmt.Print("OK\n") } else { fmt.Printf("Incorrect word, suggestions: %s\n", strings.Join(speller.Suggest(word), ", ")) } } ``` For more information see [aspell_test.go](https://github.com/trustmaster/go-aspell/blob/master/aspell_test.go) file and use the godoc tool: ``` godoc github.com/trustmaster/go-aspell ``` ## License Copyright (c) 2012, Vladimir Sibirov All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================ FILE: vendor/github.com/trustmaster/go-aspell/aspell.go ================================================ // Package aspell provides simplified bindings to GNU Aspell spell checking library. package aspell /* #cgo LDFLAGS: -laspell #include #include "aspell.h" */ import "C" import ( "errors" "unsafe" ) // Speller is a type that encapsulates Aspell internals. type Speller struct { config *C.AspellConfig speller *C.AspellSpeller } // NewSpeller creates a new speller instance with configuration options // given as a map. At least the language option should be specified // (see example below). // // The returned value is a speller struct. The second returned value // contains error data in case of error or nil if NewSpeller succeeded. // // In the most common case you would like to pass the language option // which accepts two letter ISO 639 language code and an optional // two letter ISO 3166 country code after a dash or underscore: // // opts := map[string] string { // "lang": "en_US", // American English // } // speller, err := aspell.NewSpeller(opts) // if err != nil { // panic("Aspell error: " + err.Error()) // } // defer speller.Delete() // // See available options at http://aspell.net/man-html/The-Options.html // // Because aspell package is a binding to Aspell C library, memory // allocated by NewSpeller() call has to be disposed explicitly. // This is why the above example contains a deferred call to Delete(). func NewSpeller(options map[string]string) (Speller, error) { var s Speller // Pass configuration options s.config = C.new_aspell_config() if _, hasEnc := options["encoding"]; !hasEnc { options["encoding"] = "utf-8" } for k, v := range options { optName := C.CString(k) optValue := C.CString(v) res := C.aspell_config_replace(s.config, optName, optValue) C.free(unsafe.Pointer(optName)) C.free(unsafe.Pointer(optValue)) if res == 0 { msg := C.aspell_config_error_message(s.config) err := errors.New(C.GoString(msg)) C.free(unsafe.Pointer(msg)) return s, err } } // Attempt to initialize the speller var probErr *C.AspellCanHaveError probErr = C.new_aspell_speller(s.config) C.delete_aspell_config(s.config) if C.aspell_error_number(probErr) != 0 { msg := C.aspell_error_message(probErr) err := errors.New(C.GoString(msg)) C.free(unsafe.Pointer(msg)) C.delete_aspell_can_have_error(probErr) return s, err } // Successful speller initialization s.speller = C.to_aspell_speller(probErr) s.config = C.aspell_speller_config(s.speller) return s, nil } // Config returns current Aspell configuration option value for the speller. // It returns nil in case of error. // See available options at http://aspell.net/man-html/The-Options.html func (s Speller) Config(name string) string { cName := C.CString(name) cVal := C.aspell_config_retrieve(s.config, cName) val := C.GoString(cVal) C.free(unsafe.Pointer(cName)) C.free(unsafe.Pointer(cVal)) return val } // Check looks the word up in the spell checker dictionary // and returns true if the word is found there or false // otherwise. func (s Speller) Check(word string) bool { cword := C.CString(word) defer C.free(unsafe.Pointer(cword)) res := C.aspell_speller_check(s.speller, cword, -1) return res != 0 } // Delete frees memory allocated by Aspell for the speller. func (s Speller) Delete() { // For some reason this breaks everything // if s.speller != nil { // C.delete_aspell_speller(s.speller) // } // s.config is deleted automatically } // wordListToSlice converts Aspell word list into Go slice. func wordListToSlice(list *C.AspellWordList) []string { if list == nil { return nil } count := int(C.aspell_word_list_size(list)) result := make([]string, count) elems := C.aspell_word_list_elements(list) for i := 0; i < count; i++ { word := C.aspell_string_enumeration_next(elems) if word == nil { break } result[i] = C.GoString(word) } C.delete_aspell_string_enumeration(elems) return result } // Suggest returns a slice of possible suggestions for the given word. // Nil is returned on error. func (s Speller) Suggest(word string) []string { cword := C.CString(word) defer C.free(unsafe.Pointer(cword)) suggestions := C.aspell_speller_suggest(s.speller, cword, -1) return wordListToSlice(suggestions) } // Replace saves a replacement pair to the spell checker so that it would // get higher probability on next Suggest call. // Returns true on success or false on error. func (s Speller) Replace(misspelled, correct string) bool { cmis := C.CString(misspelled) defer C.free(unsafe.Pointer(cmis)) ccor := C.CString(correct) defer C.free(unsafe.Pointer(ccor)) ret := C.aspell_speller_store_replacement(s.speller, cmis, -1, ccor, -1) return ret != -1 } // MainWordList returns the main word list used by the speller. func (s Speller) MainWordList() ([]string, error) { list := C.aspell_speller_main_word_list(s.speller) if list == nil { return nil, errors.New("Failed getting the main word list") } return wordListToSlice(list), nil } // Dict represents Aspell dictionary info. type Dict struct { name string code string jargon string size string module string } // Dicts returns the list of available aspell dictionaries. func Dicts() []Dict { config := C.new_aspell_config() dlist := C.get_aspell_dict_info_list(config) C.delete_aspell_config(config) count := int(C.aspell_dict_info_list_size(dlist)) result := make([]Dict, count) dels := C.aspell_dict_info_list_elements(dlist) for i := 0; i < count; i++ { entry := C.aspell_dict_info_enumeration_next(dels) if entry == nil { break } result[i] = Dict{ name: C.GoString(entry.name), code: C.GoString(entry.code), jargon: C.GoString(entry.jargon), size: C.GoString(entry.size_str), module: C.GoString(entry.module.name), } } C.delete_aspell_dict_info_enumeration(dels) return result } ================================================ FILE: vendor/github.com/vaughan0/go-ini/LICENSE ================================================ Copyright (c) 2013 Vaughan Newton Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: vendor/github.com/vaughan0/go-ini/README.md ================================================ go-ini ====== INI parsing library for Go (golang). View the API documentation [here](http://godoc.org/github.com/vaughan0/go-ini). Usage ----- Parse an INI file: ```go import "github.com/vaughan0/go-ini" file, err := ini.LoadFile("myfile.ini") ``` Get data from the parsed file: ```go name, ok := file.Get("person", "name") if !ok { panic("'name' variable missing from 'person' section") } ``` Iterate through values in a section: ```go for key, value := range file["mysection"] { fmt.Printf("%s => %s\n", key, value) } ``` Iterate through sections in a file: ```go for name, section := range file { fmt.Printf("Section name: %s\n", name) } ``` File Format ----------- INI files are parsed by go-ini line-by-line. Each line may be one of the following: * A section definition: [section-name] * A property: key = value * A comment: #blahblah _or_ ;blahblah * Blank. The line will be ignored. Properties defined before any section headers are placed in the default section, which has the empty string as it's key. Example: ```ini # I am a comment ; So am I! [apples] colour = red or green shape = applish [oranges] shape = square colour = blue ``` ================================================ FILE: vendor/github.com/vaughan0/go-ini/ini.go ================================================ // Package ini provides functions for parsing INI configuration files. package ini import ( "bufio" "fmt" "io" "os" "regexp" "strings" ) var ( sectionRegex = regexp.MustCompile(`^\[(.*)\]$`) assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`) ) // ErrSyntax is returned when there is a syntax error in an INI file. type ErrSyntax struct { Line int Source string // The contents of the erroneous line, without leading or trailing whitespace } func (e ErrSyntax) Error() string { return fmt.Sprintf("invalid INI syntax on line %d: %s", e.Line, e.Source) } // A File represents a parsed INI file. type File map[string]Section // A Section represents a single section of an INI file. type Section map[string]string // Returns a named Section. A Section will be created if one does not already exist for the given name. func (f File) Section(name string) Section { section := f[name] if section == nil { section = make(Section) f[name] = section } return section } // Looks up a value for a key in a section and returns that value, along with a boolean result similar to a map lookup. func (f File) Get(section, key string) (value string, ok bool) { if s := f[section]; s != nil { value, ok = s[key] } return } // Loads INI data from a reader and stores the data in the File. func (f File) Load(in io.Reader) (err error) { bufin, ok := in.(*bufio.Reader) if !ok { bufin = bufio.NewReader(in) } return parseFile(bufin, f) } // Loads INI data from a named file and stores the data in the File. func (f File) LoadFile(file string) (err error) { in, err := os.Open(file) if err != nil { return } defer in.Close() return f.Load(in) } func parseFile(in *bufio.Reader, file File) (err error) { section := "" lineNum := 0 for done := false; !done; { var line string if line, err = in.ReadString('\n'); err != nil { if err == io.EOF { done = true } else { return } } lineNum++ line = strings.TrimSpace(line) if len(line) == 0 { // Skip blank lines continue } if line[0] == ';' || line[0] == '#' { // Skip comments continue } if groups := assignRegex.FindStringSubmatch(line); groups != nil { key, val := groups[1], groups[2] key, val = strings.TrimSpace(key), strings.TrimSpace(val) file.Section(section)[key] = val } else if groups := sectionRegex.FindStringSubmatch(line); groups != nil { name := strings.TrimSpace(groups[1]) section = name // Create the section if it does not exist file.Section(section) } else { return ErrSyntax{lineNum, line} } } return nil } // Loads and returns a File from a reader. func Load(in io.Reader) (File, error) { file := make(File) err := file.Load(in) return file, err } // Loads and returns an INI File from a file on disk. func LoadFile(filename string) (File, error) { file := make(File) err := file.LoadFile(filename) return file, err } ================================================ FILE: vendor/github.com/vaughan0/go-ini/test.ini ================================================ [default] stuff = things ================================================ FILE: vendor/golang.org/x/crypto/LICENSE ================================================ Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================ FILE: vendor/golang.org/x/crypto/PATENTS ================================================ Additional IP Rights Grant (Patents) "This implementation" means the copyrightable works distributed by Google as part of the Go project. Google hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, transfer and otherwise run, modify and propagate the contents of this implementation of Go, where such license applies only to those patent claims, both currently owned or controlled by Google and acquired in the future, licensable by Google that are necessarily infringed by this implementation of Go. This grant does not include claims that would be infringed only as a consequence of further modification of this implementation. If you or your agent or exclusive licensee institute or order or agree to the institution of patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that this implementation of Go or any code incorporated within this implementation of Go constitutes direct or contributory patent infringement, or inducement of patent infringement, then any patent rights granted to you under this License for this implementation of Go shall terminate as of the date such litigation is filed. ================================================ FILE: vendor/golang.org/x/crypto/ssh/terminal/terminal.go ================================================ // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package terminal import ( "bytes" "io" "sync" "unicode/utf8" ) // EscapeCodes contains escape sequences that can be written to the terminal in // order to achieve different styles of text. type EscapeCodes struct { // Foreground colors Black, Red, Green, Yellow, Blue, Magenta, Cyan, White []byte // Reset all attributes Reset []byte } var vt100EscapeCodes = EscapeCodes{ Black: []byte{keyEscape, '[', '3', '0', 'm'}, Red: []byte{keyEscape, '[', '3', '1', 'm'}, Green: []byte{keyEscape, '[', '3', '2', 'm'}, Yellow: []byte{keyEscape, '[', '3', '3', 'm'}, Blue: []byte{keyEscape, '[', '3', '4', 'm'}, Magenta: []byte{keyEscape, '[', '3', '5', 'm'}, Cyan: []byte{keyEscape, '[', '3', '6', 'm'}, White: []byte{keyEscape, '[', '3', '7', 'm'}, Reset: []byte{keyEscape, '[', '0', 'm'}, } // Terminal contains the state for running a VT100 terminal that is capable of // reading lines of input. type Terminal struct { // AutoCompleteCallback, if non-null, is called for each keypress with // the full input line and the current position of the cursor (in // bytes, as an index into |line|). If it returns ok=false, the key // press is processed normally. Otherwise it returns a replacement line // and the new cursor position. AutoCompleteCallback func(line string, pos int, key rune) (newLine string, newPos int, ok bool) // Escape contains a pointer to the escape codes for this terminal. // It's always a valid pointer, although the escape codes themselves // may be empty if the terminal doesn't support them. Escape *EscapeCodes // lock protects the terminal and the state in this object from // concurrent processing of a key press and a Write() call. lock sync.Mutex c io.ReadWriter prompt []rune // line is the current line being entered. line []rune // pos is the logical position of the cursor in line pos int // echo is true if local echo is enabled echo bool // pasteActive is true iff there is a bracketed paste operation in // progress. pasteActive bool // cursorX contains the current X value of the cursor where the left // edge is 0. cursorY contains the row number where the first row of // the current line is 0. cursorX, cursorY int // maxLine is the greatest value of cursorY so far. maxLine int termWidth, termHeight int // outBuf contains the terminal data to be sent. outBuf []byte // remainder contains the remainder of any partial key sequences after // a read. It aliases into inBuf. remainder []byte inBuf [256]byte // history contains previously entered commands so that they can be // accessed with the up and down keys. history stRingBuffer // historyIndex stores the currently accessed history entry, where zero // means the immediately previous entry. historyIndex int // When navigating up and down the history it's possible to return to // the incomplete, initial line. That value is stored in // historyPending. historyPending string } // NewTerminal runs a VT100 terminal on the given ReadWriter. If the ReadWriter is // a local terminal, that terminal must first have been put into raw mode. // prompt is a string that is written at the start of each input line (i.e. // "> "). func NewTerminal(c io.ReadWriter, prompt string) *Terminal { return &Terminal{ Escape: &vt100EscapeCodes, c: c, prompt: []rune(prompt), termWidth: 80, termHeight: 24, echo: true, historyIndex: -1, } } const ( keyCtrlD = 4 keyCtrlU = 21 keyEnter = '\r' keyEscape = 27 keyBackspace = 127 keyUnknown = 0xd800 /* UTF-16 surrogate area */ + iota keyUp keyDown keyLeft keyRight keyAltLeft keyAltRight keyHome keyEnd keyDeleteWord keyDeleteLine keyClearScreen keyPasteStart keyPasteEnd ) var pasteStart = []byte{keyEscape, '[', '2', '0', '0', '~'} var pasteEnd = []byte{keyEscape, '[', '2', '0', '1', '~'} // bytesToKey tries to parse a key sequence from b. If successful, it returns // the key and the remainder of the input. Otherwise it returns utf8.RuneError. func bytesToKey(b []byte, pasteActive bool) (rune, []byte) { if len(b) == 0 { return utf8.RuneError, nil } if !pasteActive { switch b[0] { case 1: // ^A return keyHome, b[1:] case 5: // ^E return keyEnd, b[1:] case 8: // ^H return keyBackspace, b[1:] case 11: // ^K return keyDeleteLine, b[1:] case 12: // ^L return keyClearScreen, b[1:] case 23: // ^W return keyDeleteWord, b[1:] } } if b[0] != keyEscape { if !utf8.FullRune(b) { return utf8.RuneError, b } r, l := utf8.DecodeRune(b) return r, b[l:] } if !pasteActive && len(b) >= 3 && b[0] == keyEscape && b[1] == '[' { switch b[2] { case 'A': return keyUp, b[3:] case 'B': return keyDown, b[3:] case 'C': return keyRight, b[3:] case 'D': return keyLeft, b[3:] case 'H': return keyHome, b[3:] case 'F': return keyEnd, b[3:] } } if !pasteActive && len(b) >= 6 && b[0] == keyEscape && b[1] == '[' && b[2] == '1' && b[3] == ';' && b[4] == '3' { switch b[5] { case 'C': return keyAltRight, b[6:] case 'D': return keyAltLeft, b[6:] } } if !pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteStart) { return keyPasteStart, b[6:] } if pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteEnd) { return keyPasteEnd, b[6:] } // If we get here then we have a key that we don't recognise, or a // partial sequence. It's not clear how one should find the end of a // sequence without knowing them all, but it seems that [a-zA-Z~] only // appears at the end of a sequence. for i, c := range b[0:] { if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c == '~' { return keyUnknown, b[i+1:] } } return utf8.RuneError, b } // queue appends data to the end of t.outBuf func (t *Terminal) queue(data []rune) { t.outBuf = append(t.outBuf, []byte(string(data))...) } var eraseUnderCursor = []rune{' ', keyEscape, '[', 'D'} var space = []rune{' '} func isPrintable(key rune) bool { isInSurrogateArea := key >= 0xd800 && key <= 0xdbff return key >= 32 && !isInSurrogateArea } // moveCursorToPos appends data to t.outBuf which will move the cursor to the // given, logical position in the text. func (t *Terminal) moveCursorToPos(pos int) { if !t.echo { return } x := visualLength(t.prompt) + pos y := x / t.termWidth x = x % t.termWidth up := 0 if y < t.cursorY { up = t.cursorY - y } down := 0 if y > t.cursorY { down = y - t.cursorY } left := 0 if x < t.cursorX { left = t.cursorX - x } right := 0 if x > t.cursorX { right = x - t.cursorX } t.cursorX = x t.cursorY = y t.move(up, down, left, right) } func (t *Terminal) move(up, down, left, right int) { movement := make([]rune, 3*(up+down+left+right)) m := movement for i := 0; i < up; i++ { m[0] = keyEscape m[1] = '[' m[2] = 'A' m = m[3:] } for i := 0; i < down; i++ { m[0] = keyEscape m[1] = '[' m[2] = 'B' m = m[3:] } for i := 0; i < left; i++ { m[0] = keyEscape m[1] = '[' m[2] = 'D' m = m[3:] } for i := 0; i < right; i++ { m[0] = keyEscape m[1] = '[' m[2] = 'C' m = m[3:] } t.queue(movement) } func (t *Terminal) clearLineToRight() { op := []rune{keyEscape, '[', 'K'} t.queue(op) } const maxLineLength = 4096 func (t *Terminal) setLine(newLine []rune, newPos int) { if t.echo { t.moveCursorToPos(0) t.writeLine(newLine) for i := len(newLine); i < len(t.line); i++ { t.writeLine(space) } t.moveCursorToPos(newPos) } t.line = newLine t.pos = newPos } func (t *Terminal) advanceCursor(places int) { t.cursorX += places t.cursorY += t.cursorX / t.termWidth if t.cursorY > t.maxLine { t.maxLine = t.cursorY } t.cursorX = t.cursorX % t.termWidth if places > 0 && t.cursorX == 0 { // Normally terminals will advance the current position // when writing a character. But that doesn't happen // for the last character in a line. However, when // writing a character (except a new line) that causes // a line wrap, the position will be advanced two // places. // // So, if we are stopping at the end of a line, we // need to write a newline so that our cursor can be // advanced to the next line. t.outBuf = append(t.outBuf, '\n') } } func (t *Terminal) eraseNPreviousChars(n int) { if n == 0 { return } if t.pos < n { n = t.pos } t.pos -= n t.moveCursorToPos(t.pos) copy(t.line[t.pos:], t.line[n+t.pos:]) t.line = t.line[:len(t.line)-n] if t.echo { t.writeLine(t.line[t.pos:]) for i := 0; i < n; i++ { t.queue(space) } t.advanceCursor(n) t.moveCursorToPos(t.pos) } } // countToLeftWord returns then number of characters from the cursor to the // start of the previous word. func (t *Terminal) countToLeftWord() int { if t.pos == 0 { return 0 } pos := t.pos - 1 for pos > 0 { if t.line[pos] != ' ' { break } pos-- } for pos > 0 { if t.line[pos] == ' ' { pos++ break } pos-- } return t.pos - pos } // countToRightWord returns then number of characters from the cursor to the // start of the next word. func (t *Terminal) countToRightWord() int { pos := t.pos for pos < len(t.line) { if t.line[pos] == ' ' { break } pos++ } for pos < len(t.line) { if t.line[pos] != ' ' { break } pos++ } return pos - t.pos } // visualLength returns the number of visible glyphs in s. func visualLength(runes []rune) int { inEscapeSeq := false length := 0 for _, r := range runes { switch { case inEscapeSeq: if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') { inEscapeSeq = false } case r == '\x1b': inEscapeSeq = true default: length++ } } return length } // handleKey processes the given key and, optionally, returns a line of text // that the user has entered. func (t *Terminal) handleKey(key rune) (line string, ok bool) { if t.pasteActive && key != keyEnter { t.addKeyToLine(key) return } switch key { case keyBackspace: if t.pos == 0 { return } t.eraseNPreviousChars(1) case keyAltLeft: // move left by a word. t.pos -= t.countToLeftWord() t.moveCursorToPos(t.pos) case keyAltRight: // move right by a word. t.pos += t.countToRightWord() t.moveCursorToPos(t.pos) case keyLeft: if t.pos == 0 { return } t.pos-- t.moveCursorToPos(t.pos) case keyRight: if t.pos == len(t.line) { return } t.pos++ t.moveCursorToPos(t.pos) case keyHome: if t.pos == 0 { return } t.pos = 0 t.moveCursorToPos(t.pos) case keyEnd: if t.pos == len(t.line) { return } t.pos = len(t.line) t.moveCursorToPos(t.pos) case keyUp: entry, ok := t.history.NthPreviousEntry(t.historyIndex + 1) if !ok { return "", false } if t.historyIndex == -1 { t.historyPending = string(t.line) } t.historyIndex++ runes := []rune(entry) t.setLine(runes, len(runes)) case keyDown: switch t.historyIndex { case -1: return case 0: runes := []rune(t.historyPending) t.setLine(runes, len(runes)) t.historyIndex-- default: entry, ok := t.history.NthPreviousEntry(t.historyIndex - 1) if ok { t.historyIndex-- runes := []rune(entry) t.setLine(runes, len(runes)) } } case keyEnter: t.moveCursorToPos(len(t.line)) t.queue([]rune("\r\n")) line = string(t.line) ok = true t.line = t.line[:0] t.pos = 0 t.cursorX = 0 t.cursorY = 0 t.maxLine = 0 case keyDeleteWord: // Delete zero or more spaces and then one or more characters. t.eraseNPreviousChars(t.countToLeftWord()) case keyDeleteLine: // Delete everything from the current cursor position to the // end of line. for i := t.pos; i < len(t.line); i++ { t.queue(space) t.advanceCursor(1) } t.line = t.line[:t.pos] t.moveCursorToPos(t.pos) case keyCtrlD: // Erase the character under the current position. // The EOF case when the line is empty is handled in // readLine(). if t.pos < len(t.line) { t.pos++ t.eraseNPreviousChars(1) } case keyCtrlU: t.eraseNPreviousChars(t.pos) case keyClearScreen: // Erases the screen and moves the cursor to the home position. t.queue([]rune("\x1b[2J\x1b[H")) t.queue(t.prompt) t.cursorX, t.cursorY = 0, 0 t.advanceCursor(visualLength(t.prompt)) t.setLine(t.line, t.pos) default: if t.AutoCompleteCallback != nil { prefix := string(t.line[:t.pos]) suffix := string(t.line[t.pos:]) t.lock.Unlock() newLine, newPos, completeOk := t.AutoCompleteCallback(prefix+suffix, len(prefix), key) t.lock.Lock() if completeOk { t.setLine([]rune(newLine), utf8.RuneCount([]byte(newLine)[:newPos])) return } } if !isPrintable(key) { return } if len(t.line) == maxLineLength { return } t.addKeyToLine(key) } return } // addKeyToLine inserts the given key at the current position in the current // line. func (t *Terminal) addKeyToLine(key rune) { if len(t.line) == cap(t.line) { newLine := make([]rune, len(t.line), 2*(1+len(t.line))) copy(newLine, t.line) t.line = newLine } t.line = t.line[:len(t.line)+1] copy(t.line[t.pos+1:], t.line[t.pos:]) t.line[t.pos] = key if t.echo { t.writeLine(t.line[t.pos:]) } t.pos++ t.moveCursorToPos(t.pos) } func (t *Terminal) writeLine(line []rune) { for len(line) != 0 { remainingOnLine := t.termWidth - t.cursorX todo := len(line) if todo > remainingOnLine { todo = remainingOnLine } t.queue(line[:todo]) t.advanceCursor(visualLength(line[:todo])) line = line[todo:] } } func (t *Terminal) Write(buf []byte) (n int, err error) { t.lock.Lock() defer t.lock.Unlock() if t.cursorX == 0 && t.cursorY == 0 { // This is the easy case: there's nothing on the screen that we // have to move out of the way. return t.c.Write(buf) } // We have a prompt and possibly user input on the screen. We // have to clear it first. t.move(0 /* up */, 0 /* down */, t.cursorX /* left */, 0 /* right */) t.cursorX = 0 t.clearLineToRight() for t.cursorY > 0 { t.move(1 /* up */, 0, 0, 0) t.cursorY-- t.clearLineToRight() } if _, err = t.c.Write(t.outBuf); err != nil { return } t.outBuf = t.outBuf[:0] if n, err = t.c.Write(buf); err != nil { return } t.writeLine(t.prompt) if t.echo { t.writeLine(t.line) } t.moveCursorToPos(t.pos) if _, err = t.c.Write(t.outBuf); err != nil { return } t.outBuf = t.outBuf[:0] return } // ReadPassword temporarily changes the prompt and reads a password, without // echo, from the terminal. func (t *Terminal) ReadPassword(prompt string) (line string, err error) { t.lock.Lock() defer t.lock.Unlock() oldPrompt := t.prompt t.prompt = []rune(prompt) t.echo = false line, err = t.readLine() t.prompt = oldPrompt t.echo = true return } // ReadLine returns a line of input from the terminal. func (t *Terminal) ReadLine() (line string, err error) { t.lock.Lock() defer t.lock.Unlock() return t.readLine() } func (t *Terminal) readLine() (line string, err error) { // t.lock must be held at this point if t.cursorX == 0 && t.cursorY == 0 { t.writeLine(t.prompt) t.c.Write(t.outBuf) t.outBuf = t.outBuf[:0] } lineIsPasted := t.pasteActive for { rest := t.remainder lineOk := false for !lineOk { var key rune key, rest = bytesToKey(rest, t.pasteActive) if key == utf8.RuneError { break } if !t.pasteActive { if key == keyCtrlD { if len(t.line) == 0 { return "", io.EOF } } if key == keyPasteStart { t.pasteActive = true if len(t.line) == 0 { lineIsPasted = true } continue } } else if key == keyPasteEnd { t.pasteActive = false continue } if !t.pasteActive { lineIsPasted = false } line, lineOk = t.handleKey(key) } if len(rest) > 0 { n := copy(t.inBuf[:], rest) t.remainder = t.inBuf[:n] } else { t.remainder = nil } t.c.Write(t.outBuf) t.outBuf = t.outBuf[:0] if lineOk { if t.echo { t.historyIndex = -1 t.history.Add(line) } if lineIsPasted { err = ErrPasteIndicator } return } // t.remainder is a slice at the beginning of t.inBuf // containing a partial key sequence readBuf := t.inBuf[len(t.remainder):] var n int t.lock.Unlock() n, err = t.c.Read(readBuf) t.lock.Lock() if err != nil { return } t.remainder = t.inBuf[:n+len(t.remainder)] } panic("unreachable") // for Go 1.0. } // SetPrompt sets the prompt to be used when reading subsequent lines. func (t *Terminal) SetPrompt(prompt string) { t.lock.Lock() defer t.lock.Unlock() t.prompt = []rune(prompt) } func (t *Terminal) clearAndRepaintLinePlusNPrevious(numPrevLines int) { // Move cursor to column zero at the start of the line. t.move(t.cursorY, 0, t.cursorX, 0) t.cursorX, t.cursorY = 0, 0 t.clearLineToRight() for t.cursorY < numPrevLines { // Move down a line t.move(0, 1, 0, 0) t.cursorY++ t.clearLineToRight() } // Move back to beginning. t.move(t.cursorY, 0, 0, 0) t.cursorX, t.cursorY = 0, 0 t.queue(t.prompt) t.advanceCursor(visualLength(t.prompt)) t.writeLine(t.line) t.moveCursorToPos(t.pos) } func (t *Terminal) SetSize(width, height int) error { t.lock.Lock() defer t.lock.Unlock() if width == 0 { width = 1 } oldWidth := t.termWidth t.termWidth, t.termHeight = width, height switch { case width == oldWidth: // If the width didn't change then nothing else needs to be // done. return nil case len(t.line) == 0 && t.cursorX == 0 && t.cursorY == 0: // If there is nothing on current line and no prompt printed, // just do nothing return nil case width < oldWidth: // Some terminals (e.g. xterm) will truncate lines that were // too long when shinking. Others, (e.g. gnome-terminal) will // attempt to wrap them. For the former, repainting t.maxLine // works great, but that behaviour goes badly wrong in the case // of the latter because they have doubled every full line. // We assume that we are working on a terminal that wraps lines // and adjust the cursor position based on every previous line // wrapping and turning into two. This causes the prompt on // xterms to move upwards, which isn't great, but it avoids a // huge mess with gnome-terminal. if t.cursorX >= t.termWidth { t.cursorX = t.termWidth - 1 } t.cursorY *= 2 t.clearAndRepaintLinePlusNPrevious(t.maxLine * 2) case width > oldWidth: // If the terminal expands then our position calculations will // be wrong in the future because we think the cursor is // |t.pos| chars into the string, but there will be a gap at // the end of any wrapped line. // // But the position will actually be correct until we move, so // we can move back to the beginning and repaint everything. t.clearAndRepaintLinePlusNPrevious(t.maxLine) } _, err := t.c.Write(t.outBuf) t.outBuf = t.outBuf[:0] return err } type pasteIndicatorError struct{} func (pasteIndicatorError) Error() string { return "terminal: ErrPasteIndicator not correctly handled" } // ErrPasteIndicator may be returned from ReadLine as the error, in addition // to valid line data. It indicates that bracketed paste mode is enabled and // that the returned line consists only of pasted data. Programs may wish to // interpret pasted data more literally than typed data. var ErrPasteIndicator = pasteIndicatorError{} // SetBracketedPasteMode requests that the terminal bracket paste operations // with markers. Not all terminals support this but, if it is supported, then // enabling this mode will stop any autocomplete callback from running due to // pastes. Additionally, any lines that are completely pasted will be returned // from ReadLine with the error set to ErrPasteIndicator. func (t *Terminal) SetBracketedPasteMode(on bool) { if on { io.WriteString(t.c, "\x1b[?2004h") } else { io.WriteString(t.c, "\x1b[?2004l") } } // stRingBuffer is a ring buffer of strings. type stRingBuffer struct { // entries contains max elements. entries []string max int // head contains the index of the element most recently added to the ring. head int // size contains the number of elements in the ring. size int } func (s *stRingBuffer) Add(a string) { if s.entries == nil { const defaultNumEntries = 100 s.entries = make([]string, defaultNumEntries) s.max = defaultNumEntries } s.head = (s.head + 1) % s.max s.entries[s.head] = a if s.size < s.max { s.size++ } } // NthPreviousEntry returns the value passed to the nth previous call to Add. // If n is zero then the immediately prior value is returned, if one, then the // next most recent, and so on. If such an element doesn't exist then ok is // false. func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) { if n >= s.size { return "", false } index := s.head - n if index < 0 { index += s.max } return s.entries[index], true } ================================================ FILE: vendor/golang.org/x/crypto/ssh/terminal/util.go ================================================ // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build darwin dragonfly freebsd linux,!appengine netbsd openbsd // Package terminal provides support functions for dealing with terminals, as // commonly found on UNIX systems. // // Putting a terminal into raw mode is the most common requirement: // // oldState, err := terminal.MakeRaw(0) // if err != nil { // panic(err) // } // defer terminal.Restore(0, oldState) package terminal import ( "io" "syscall" "unsafe" ) // State contains the state of a terminal. type State struct { termios syscall.Termios } // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal(fd int) bool { var termios syscall.Termios _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) return err == 0 } // MakeRaw put the terminal connected to the given file descriptor into raw // mode and returns the previous state of the terminal so that it can be // restored. func MakeRaw(fd int) (*State, error) { var oldState State if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 { return nil, err } newState := oldState.termios newState.Iflag &^= syscall.ISTRIP | syscall.INLCR | syscall.ICRNL | syscall.IGNCR | syscall.IXON | syscall.IXOFF newState.Lflag &^= syscall.ECHO | syscall.ICANON | syscall.ISIG if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 { return nil, err } return &oldState, nil } // GetState returns the current state of a terminal which may be useful to // restore the terminal after a signal. func GetState(fd int) (*State, error) { var oldState State if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 { return nil, err } return &oldState, nil } // Restore restores the terminal connected to the given file descriptor to a // previous state. func Restore(fd int, state *State) error { _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&state.termios)), 0, 0, 0) return err } // GetSize returns the dimensions of the given terminal. func GetSize(fd int) (width, height int, err error) { var dimensions [4]uint16 if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0); err != 0 { return -1, -1, err } return int(dimensions[1]), int(dimensions[0]), nil } // ReadPassword reads a line of input from a terminal without local echo. This // is commonly used for inputting passwords and other sensitive data. The slice // returned does not include the \n. func ReadPassword(fd int) ([]byte, error) { var oldState syscall.Termios if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); err != 0 { return nil, err } newState := oldState newState.Lflag &^= syscall.ECHO newState.Lflag |= syscall.ICANON | syscall.ISIG newState.Iflag |= syscall.ICRNL if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 { return nil, err } defer func() { syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0) }() var buf [16]byte var ret []byte for { n, err := syscall.Read(fd, buf[:]) if err != nil { return nil, err } if n == 0 { if len(ret) == 0 { return nil, io.EOF } break } if buf[n-1] == '\n' { n-- } ret = append(ret, buf[:n]...) if n < len(buf) { break } } return ret, nil } ================================================ FILE: vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go ================================================ // Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build darwin dragonfly freebsd netbsd openbsd package terminal import "syscall" const ioctlReadTermios = syscall.TIOCGETA const ioctlWriteTermios = syscall.TIOCSETA ================================================ FILE: vendor/golang.org/x/crypto/ssh/terminal/util_linux.go ================================================ // Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package terminal // These constants are declared here, rather than importing // them from the syscall package as some syscall packages, even // on linux, for example gccgo, do not declare them. const ioctlReadTermios = 0x5401 // syscall.TCGETS const ioctlWriteTermios = 0x5402 // syscall.TCSETS ================================================ FILE: vendor/golang.org/x/crypto/ssh/terminal/util_windows.go ================================================ // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build windows // Package terminal provides support functions for dealing with terminals, as // commonly found on UNIX systems. // // Putting a terminal into raw mode is the most common requirement: // // oldState, err := terminal.MakeRaw(0) // if err != nil { // panic(err) // } // defer terminal.Restore(0, oldState) package terminal import ( "io" "syscall" "unsafe" ) const ( enableLineInput = 2 enableEchoInput = 4 enableProcessedInput = 1 enableWindowInput = 8 enableMouseInput = 16 enableInsertMode = 32 enableQuickEditMode = 64 enableExtendedFlags = 128 enableAutoPosition = 256 enableProcessedOutput = 1 enableWrapAtEolOutput = 2 ) var kernel32 = syscall.NewLazyDLL("kernel32.dll") var ( procGetConsoleMode = kernel32.NewProc("GetConsoleMode") procSetConsoleMode = kernel32.NewProc("SetConsoleMode") procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") ) type ( short int16 word uint16 coord struct { x short y short } smallRect struct { left short top short right short bottom short } consoleScreenBufferInfo struct { size coord cursorPosition coord attributes word window smallRect maximumWindowSize coord } ) type State struct { mode uint32 } // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal(fd int) bool { var st uint32 r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) return r != 0 && e == 0 } // MakeRaw put the terminal connected to the given file descriptor into raw // mode and returns the previous state of the terminal so that it can be // restored. func MakeRaw(fd int) (*State, error) { var st uint32 _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) if e != 0 { return nil, error(e) } st &^= (enableEchoInput | enableProcessedInput | enableLineInput | enableProcessedOutput) _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0) if e != 0 { return nil, error(e) } return &State{st}, nil } // GetState returns the current state of a terminal which may be useful to // restore the terminal after a signal. func GetState(fd int) (*State, error) { var st uint32 _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) if e != 0 { return nil, error(e) } return &State{st}, nil } // Restore restores the terminal connected to the given file descriptor to a // previous state. func Restore(fd int, state *State) error { _, _, err := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(state.mode), 0) return err } // GetSize returns the dimensions of the given terminal. func GetSize(fd int) (width, height int, err error) { var info consoleScreenBufferInfo _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&info)), 0) if e != 0 { return 0, 0, error(e) } return int(info.size.x), int(info.size.y), nil } // ReadPassword reads a line of input from a terminal without local echo. This // is commonly used for inputting passwords and other sensitive data. The slice // returned does not include the \n. func ReadPassword(fd int) ([]byte, error) { var st uint32 _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) if e != 0 { return nil, error(e) } old := st st &^= (enableEchoInput) st |= (enableProcessedInput | enableLineInput | enableProcessedOutput) _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0) if e != 0 { return nil, error(e) } defer func() { syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(old), 0) }() var buf [16]byte var ret []byte for { n, err := syscall.Read(syscall.Handle(fd), buf[:]) if err != nil { return nil, err } if n == 0 { if len(ret) == 0 { return nil, io.EOF } break } if buf[n-1] == '\n' { n-- } if n > 0 && buf[n-1] == '\r' { n-- } ret = append(ret, buf[:n]...) if n < len(buf) { break } } return ret, nil } ================================================ FILE: vendor/golang.org/x/net/LICENSE ================================================ Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================ FILE: vendor/golang.org/x/net/PATENTS ================================================ Additional IP Rights Grant (Patents) "This implementation" means the copyrightable works distributed by Google as part of the Go project. Google hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, transfer and otherwise run, modify and propagate the contents of this implementation of Go, where such license applies only to those patent claims, both currently owned or controlled by Google and acquired in the future, licensable by Google that are necessarily infringed by this implementation of Go. This grant does not include claims that would be infringed only as a consequence of further modification of this implementation. If you or your agent or exclusive licensee institute or order or agree to the institution of patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that this implementation of Go or any code incorporated within this implementation of Go constitutes direct or contributory patent infringement, or inducement of patent infringement, then any patent rights granted to you under this License for this implementation of Go shall terminate as of the date such litigation is filed. ================================================ FILE: vendor/golang.org/x/net/context/context.go ================================================ // Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package context defines the Context type, which carries deadlines, // cancelation signals, and other request-scoped values across API boundaries // and between processes. // // Incoming requests to a server should create a Context, and outgoing calls to // servers should accept a Context. The chain of function calls between must // propagate the Context, optionally replacing it with a modified copy created // using WithDeadline, WithTimeout, WithCancel, or WithValue. // // Programs that use Contexts should follow these rules to keep interfaces // consistent across packages and enable static analysis tools to check context // propagation: // // Do not store Contexts inside a struct type; instead, pass a Context // explicitly to each function that needs it. The Context should be the first // parameter, typically named ctx: // // func DoSomething(ctx context.Context, arg Arg) error { // // ... use ctx ... // } // // Do not pass a nil Context, even if a function permits it. Pass context.TODO // if you are unsure about which Context to use. // // Use context Values only for request-scoped data that transits processes and // APIs, not for passing optional parameters to functions. // // The same Context may be passed to functions running in different goroutines; // Contexts are safe for simultaneous use by multiple goroutines. // // See http://blog.golang.org/context for example code for a server that uses // Contexts. package context import ( "errors" "fmt" "sync" "time" ) // A Context carries a deadline, a cancelation signal, and other values across // API boundaries. // // Context's methods may be called by multiple goroutines simultaneously. type Context interface { // Deadline returns the time when work done on behalf of this context // should be canceled. Deadline returns ok==false when no deadline is // set. Successive calls to Deadline return the same results. Deadline() (deadline time.Time, ok bool) // Done returns a channel that's closed when work done on behalf of this // context should be canceled. Done may return nil if this context can // never be canceled. Successive calls to Done return the same value. // // WithCancel arranges for Done to be closed when cancel is called; // WithDeadline arranges for Done to be closed when the deadline // expires; WithTimeout arranges for Done to be closed when the timeout // elapses. // // Done is provided for use in select statements: // // // Stream generates values with DoSomething and sends them to out // // until DoSomething returns an error or ctx.Done is closed. // func Stream(ctx context.Context, out <-chan Value) error { // for { // v, err := DoSomething(ctx) // if err != nil { // return err // } // select { // case <-ctx.Done(): // return ctx.Err() // case out <- v: // } // } // } // // See http://blog.golang.org/pipelines for more examples of how to use // a Done channel for cancelation. Done() <-chan struct{} // Err returns a non-nil error value after Done is closed. Err returns // Canceled if the context was canceled or DeadlineExceeded if the // context's deadline passed. No other values for Err are defined. // After Done is closed, successive calls to Err return the same value. Err() error // Value returns the value associated with this context for key, or nil // if no value is associated with key. Successive calls to Value with // the same key returns the same result. // // Use context values only for request-scoped data that transits // processes and API boundaries, not for passing optional parameters to // functions. // // A key identifies a specific value in a Context. Functions that wish // to store values in Context typically allocate a key in a global // variable then use that key as the argument to context.WithValue and // Context.Value. A key can be any type that supports equality; // packages should define keys as an unexported type to avoid // collisions. // // Packages that define a Context key should provide type-safe accessors // for the values stores using that key: // // // Package user defines a User type that's stored in Contexts. // package user // // import "golang.org/x/net/context" // // // User is the type of value stored in the Contexts. // type User struct {...} // // // key is an unexported type for keys defined in this package. // // This prevents collisions with keys defined in other packages. // type key int // // // userKey is the key for user.User values in Contexts. It is // // unexported; clients use user.NewContext and user.FromContext // // instead of using this key directly. // var userKey key = 0 // // // NewContext returns a new Context that carries value u. // func NewContext(ctx context.Context, u *User) context.Context { // return context.WithValue(ctx, userKey, u) // } // // // FromContext returns the User value stored in ctx, if any. // func FromContext(ctx context.Context) (*User, bool) { // u, ok := ctx.Value(userKey).(*User) // return u, ok // } Value(key interface{}) interface{} } // Canceled is the error returned by Context.Err when the context is canceled. var Canceled = errors.New("context canceled") // DeadlineExceeded is the error returned by Context.Err when the context's // deadline passes. var DeadlineExceeded = errors.New("context deadline exceeded") // An emptyCtx is never canceled, has no values, and has no deadline. It is not // struct{}, since vars of this type must have distinct addresses. type emptyCtx int func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { return } func (*emptyCtx) Done() <-chan struct{} { return nil } func (*emptyCtx) Err() error { return nil } func (*emptyCtx) Value(key interface{}) interface{} { return nil } func (e *emptyCtx) String() string { switch e { case background: return "context.Background" case todo: return "context.TODO" } return "unknown empty Context" } var ( background = new(emptyCtx) todo = new(emptyCtx) ) // Background returns a non-nil, empty Context. It is never canceled, has no // values, and has no deadline. It is typically used by the main function, // initialization, and tests, and as the top-level Context for incoming // requests. func Background() Context { return background } // TODO returns a non-nil, empty Context. Code should use context.TODO when // it's unclear which Context to use or it's is not yet available (because the // surrounding function has not yet been extended to accept a Context // parameter). TODO is recognized by static analysis tools that determine // whether Contexts are propagated correctly in a program. func TODO() Context { return todo } // A CancelFunc tells an operation to abandon its work. // A CancelFunc does not wait for the work to stop. // After the first call, subsequent calls to a CancelFunc do nothing. type CancelFunc func() // WithCancel returns a copy of parent with a new Done channel. The returned // context's Done channel is closed when the returned cancel function is called // or when the parent context's Done channel is closed, whichever happens first. // // Canceling this context releases resources associated with it, so code should // call cancel as soon as the operations running in this Context complete. func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { c := newCancelCtx(parent) propagateCancel(parent, &c) return &c, func() { c.cancel(true, Canceled) } } // newCancelCtx returns an initialized cancelCtx. func newCancelCtx(parent Context) cancelCtx { return cancelCtx{ Context: parent, done: make(chan struct{}), } } // propagateCancel arranges for child to be canceled when parent is. func propagateCancel(parent Context, child canceler) { if parent.Done() == nil { return // parent is never canceled } if p, ok := parentCancelCtx(parent); ok { p.mu.Lock() if p.err != nil { // parent has already been canceled child.cancel(false, p.err) } else { if p.children == nil { p.children = make(map[canceler]bool) } p.children[child] = true } p.mu.Unlock() } else { go func() { select { case <-parent.Done(): child.cancel(false, parent.Err()) case <-child.Done(): } }() } } // parentCancelCtx follows a chain of parent references until it finds a // *cancelCtx. This function understands how each of the concrete types in this // package represents its parent. func parentCancelCtx(parent Context) (*cancelCtx, bool) { for { switch c := parent.(type) { case *cancelCtx: return c, true case *timerCtx: return &c.cancelCtx, true case *valueCtx: parent = c.Context default: return nil, false } } } // removeChild removes a context from its parent. func removeChild(parent Context, child canceler) { p, ok := parentCancelCtx(parent) if !ok { return } p.mu.Lock() if p.children != nil { delete(p.children, child) } p.mu.Unlock() } // A canceler is a context type that can be canceled directly. The // implementations are *cancelCtx and *timerCtx. type canceler interface { cancel(removeFromParent bool, err error) Done() <-chan struct{} } // A cancelCtx can be canceled. When canceled, it also cancels any children // that implement canceler. type cancelCtx struct { Context done chan struct{} // closed by the first cancel call. mu sync.Mutex children map[canceler]bool // set to nil by the first cancel call err error // set to non-nil by the first cancel call } func (c *cancelCtx) Done() <-chan struct{} { return c.done } func (c *cancelCtx) Err() error { c.mu.Lock() defer c.mu.Unlock() return c.err } func (c *cancelCtx) String() string { return fmt.Sprintf("%v.WithCancel", c.Context) } // cancel closes c.done, cancels each of c's children, and, if // removeFromParent is true, removes c from its parent's children. func (c *cancelCtx) cancel(removeFromParent bool, err error) { if err == nil { panic("context: internal error: missing cancel error") } c.mu.Lock() if c.err != nil { c.mu.Unlock() return // already canceled } c.err = err close(c.done) for child := range c.children { // NOTE: acquiring the child's lock while holding parent's lock. child.cancel(false, err) } c.children = nil c.mu.Unlock() if removeFromParent { removeChild(c.Context, c) } } // WithDeadline returns a copy of the parent context with the deadline adjusted // to be no later than d. If the parent's deadline is already earlier than d, // WithDeadline(parent, d) is semantically equivalent to parent. The returned // context's Done channel is closed when the deadline expires, when the returned // cancel function is called, or when the parent context's Done channel is // closed, whichever happens first. // // Canceling this context releases resources associated with it, so code should // call cancel as soon as the operations running in this Context complete. func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { // The current deadline is already sooner than the new one. return WithCancel(parent) } c := &timerCtx{ cancelCtx: newCancelCtx(parent), deadline: deadline, } propagateCancel(parent, c) d := deadline.Sub(time.Now()) if d <= 0 { c.cancel(true, DeadlineExceeded) // deadline has already passed return c, func() { c.cancel(true, Canceled) } } c.mu.Lock() defer c.mu.Unlock() if c.err == nil { c.timer = time.AfterFunc(d, func() { c.cancel(true, DeadlineExceeded) }) } return c, func() { c.cancel(true, Canceled) } } // A timerCtx carries a timer and a deadline. It embeds a cancelCtx to // implement Done and Err. It implements cancel by stopping its timer then // delegating to cancelCtx.cancel. type timerCtx struct { cancelCtx timer *time.Timer // Under cancelCtx.mu. deadline time.Time } func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { return c.deadline, true } func (c *timerCtx) String() string { return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) } func (c *timerCtx) cancel(removeFromParent bool, err error) { c.cancelCtx.cancel(false, err) if removeFromParent { // Remove this timerCtx from its parent cancelCtx's children. removeChild(c.cancelCtx.Context, c) } c.mu.Lock() if c.timer != nil { c.timer.Stop() c.timer = nil } c.mu.Unlock() } // WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). // // Canceling this context releases resources associated with it, so code should // call cancel as soon as the operations running in this Context complete: // // func slowOperationWithTimeout(ctx context.Context) (Result, error) { // ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) // defer cancel() // releases resources if slowOperation completes before timeout elapses // return slowOperation(ctx) // } func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { return WithDeadline(parent, time.Now().Add(timeout)) } // WithValue returns a copy of parent in which the value associated with key is // val. // // Use context Values only for request-scoped data that transits processes and // APIs, not for passing optional parameters to functions. func WithValue(parent Context, key interface{}, val interface{}) Context { return &valueCtx{parent, key, val} } // A valueCtx carries a key-value pair. It implements Value for that key and // delegates all other calls to the embedded Context. type valueCtx struct { Context key, val interface{} } func (c *valueCtx) String() string { return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) } func (c *valueCtx) Value(key interface{}) interface{} { if c.key == key { return c.val } return c.Context.Value(key) } ================================================ FILE: vendor/golang.org/x/oauth2/.travis.yml ================================================ language: go go: - 1.3 - 1.4 install: - export GOPATH="$HOME/gopath" - mkdir -p "$GOPATH/src/golang.org/x" - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2" - go get -v -t -d golang.org/x/oauth2/... script: - go test -v golang.org/x/oauth2/... ================================================ FILE: vendor/golang.org/x/oauth2/AUTHORS ================================================ # This source code refers to The Go Authors for copyright purposes. # The master list of authors is in the main Go distribution, # visible at http://tip.golang.org/AUTHORS. ================================================ FILE: vendor/golang.org/x/oauth2/CONTRIBUTING.md ================================================ # Contributing to Go Go is an open source project. It is the work of hundreds of contributors. We appreciate your help! ## Filing issues When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions: 1. What version of Go are you using (`go version`)? 2. What operating system and processor architecture are you using? 3. What did you do? 4. What did you expect to see? 5. What did you see instead? General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. The gophers there will answer or ask you to file an issue if you've tripped over a bug. ## Contributing code Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) before sending patches. **We do not accept GitHub pull requests** (we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). Unless otherwise noted, the Go source files are distributed under the BSD-style license found in the LICENSE file. ================================================ FILE: vendor/golang.org/x/oauth2/CONTRIBUTORS ================================================ # This source code was written by the Go contributors. # The master list of contributors is in the main Go distribution, # visible at http://tip.golang.org/CONTRIBUTORS. ================================================ FILE: vendor/golang.org/x/oauth2/LICENSE ================================================ Copyright (c) 2009 The oauth2 Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================ FILE: vendor/golang.org/x/oauth2/README.md ================================================ # OAuth2 for Go [![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2) oauth2 package contains a client implementation for OAuth 2.0 spec. ## Installation ~~~~ go get golang.org/x/oauth2 ~~~~ See godoc for further documentation and examples. * [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2) * [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google) ## App Engine In change 96e89be (March 2015) we removed the `oauth2.Context2` type in favor of the [`context.Context`](https://golang.org/x/net/context#Context) type from the `golang.org/x/net/context` package This means its no longer possible to use the "Classic App Engine" `appengine.Context` type with the `oauth2` package. (You're using Classic App Engine if you import the package `"appengine"`.) To work around this, you may use the new `"google.golang.org/appengine"` package. This package has almost the same API as the `"appengine"` package, but it can be fetched with `go get` and used on "Managed VMs" and well as Classic App Engine. See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app) for information on updating your app. If you don't want to update your entire app to use the new App Engine packages, you may use both sets of packages in parallel, using only the new packages with the `oauth2` package. import ( "golang.org/x/net/context" "golang.org/x/oauth2" "golang.org/x/oauth2/google" newappengine "google.golang.org/appengine" newurlfetch "google.golang.org/appengine/urlfetch" "appengine" ) func handler(w http.ResponseWriter, r *http.Request) { var c appengine.Context = appengine.NewContext(r) c.Infof("Logging a message with the old package") var ctx context.Context = newappengine.NewContext(r) client := &http.Client{ Transport: &oauth2.Transport{ Source: google.AppEngineTokenSource(ctx, "scope"), Base: &newurlfetch.Transport{Context: ctx}, }, } client.Get("...") } ================================================ FILE: vendor/golang.org/x/oauth2/client_appengine.go ================================================ // Copyright 2014 The oauth2 Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build appengine appenginevm // App Engine hooks. package oauth2 import ( "net/http" "golang.org/x/net/context" "golang.org/x/oauth2/internal" "google.golang.org/appengine/urlfetch" ) func init() { internal.RegisterContextClientFunc(contextClientAppEngine) } func contextClientAppEngine(ctx context.Context) (*http.Client, error) { return urlfetch.Client(ctx), nil } ================================================ FILE: vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go ================================================ // Copyright 2014 The oauth2 Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package clientcredentials implements the OAuth2.0 "client credentials" token flow, // also known as the "two-legged OAuth 2.0". // // This should be used when the client is acting on its own behalf or when the client // is the resource owner. It may also be used when requesting access to protected // resources based on an authorization previously arranged with the authorization // server. // // See http://tools.ietf.org/html/draft-ietf-oauth-v2-31#section-4.4 package clientcredentials import ( "net/http" "net/url" "strings" "golang.org/x/net/context" "golang.org/x/oauth2" "golang.org/x/oauth2/internal" ) // tokenFromInternal maps an *internal.Token struct into // an *oauth2.Token struct. func tokenFromInternal(t *internal.Token) *oauth2.Token { if t == nil { return nil } tk := &oauth2.Token{ AccessToken: t.AccessToken, TokenType: t.TokenType, RefreshToken: t.RefreshToken, Expiry: t.Expiry, } return tk.WithExtra(t.Raw) } // retrieveToken takes a *Config and uses that to retrieve an *internal.Token. // This token is then mapped from *internal.Token into an *oauth2.Token which is // returned along with an error. func retrieveToken(ctx context.Context, c *Config, v url.Values) (*oauth2.Token, error) { tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.TokenURL, v) if err != nil { return nil, err } return tokenFromInternal(tk), nil } // Client Credentials Config describes a 2-legged OAuth2 flow, with both the // client application information and the server's endpoint URLs. type Config struct { // ClientID is the application's ID. ClientID string // ClientSecret is the application's secret. ClientSecret string // TokenURL is the resource server's token endpoint // URL. This is a constant specific to each server. TokenURL string // Scope specifies optional requested permissions. Scopes []string } // Token uses client credentials to retreive a token. // The HTTP client to use is derived from the context. // If nil, http.DefaultClient is used. func (c *Config) Token(ctx context.Context) (*oauth2.Token, error) { return retrieveToken(ctx, c, url.Values{ "grant_type": {"client_credentials"}, "scope": internal.CondVal(strings.Join(c.Scopes, " ")), }) } // Client returns an HTTP client using the provided token. // The token will auto-refresh as necessary. The underlying // HTTP transport will be obtained using the provided context. // The returned client and its Transport should not be modified. func (c *Config) Client(ctx context.Context) *http.Client { return oauth2.NewClient(ctx, c.TokenSource(ctx)) } // TokenSource returns a TokenSource that returns t until t expires, // automatically refreshing it as necessary using the provided context and the // client ID and client secret. // // Most users will use Config.Client instead. func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { source := &tokenSource{ ctx: ctx, conf: c, } return oauth2.ReuseTokenSource(nil, source) } type tokenSource struct { ctx context.Context conf *Config } // Token refreshes the token by using a new client credentials request. // tokens received this way do not include a refresh token func (c *tokenSource) Token() (*oauth2.Token, error) { return retrieveToken(c.ctx, c.conf, url.Values{ "grant_type": {"client_credentials"}, "scope": internal.CondVal(strings.Join(c.conf.Scopes, " ")), }) } ================================================ FILE: vendor/golang.org/x/oauth2/facebook/facebook.go ================================================ // Copyright 2015 The oauth2 Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package facebook provides constants for using OAuth2 to access Facebook. package facebook import ( "golang.org/x/oauth2" ) // Endpoint is Facebook's OAuth 2.0 endpoint. var Endpoint = oauth2.Endpoint{ AuthURL: "https://www.facebook.com/dialog/oauth", TokenURL: "https://graph.facebook.com/oauth/access_token", } ================================================ FILE: vendor/golang.org/x/oauth2/github/github.go ================================================ // Copyright 2014 The oauth2 Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package github provides constants for using OAuth2 to access Github. package github import ( "golang.org/x/oauth2" ) // Endpoint is Github's OAuth 2.0 endpoint. var Endpoint = oauth2.Endpoint{ AuthURL: "https://github.com/login/oauth/authorize", TokenURL: "https://github.com/login/oauth/access_token", } ================================================ FILE: vendor/golang.org/x/oauth2/google/appengine.go ================================================ // Copyright 2014 The oauth2 Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package google import ( "sort" "strings" "sync" "time" "golang.org/x/net/context" "golang.org/x/oauth2" ) // Set at init time by appengine_hook.go. If nil, we're not on App Engine. var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error) // AppEngineTokenSource returns a token source that fetches tokens // issued to the current App Engine application's service account. // If you are implementing a 3-legged OAuth 2.0 flow on App Engine // that involves user accounts, see oauth2.Config instead. // // The provided context must have come from appengine.NewContext. func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { if appengineTokenFunc == nil { panic("google: AppEngineTokenSource can only be used on App Engine.") } scopes := append([]string{}, scope...) sort.Strings(scopes) return &appEngineTokenSource{ ctx: ctx, scopes: scopes, key: strings.Join(scopes, " "), } } // aeTokens helps the fetched tokens to be reused until their expiration. var ( aeTokensMu sync.Mutex aeTokens = make(map[string]*tokenLock) // key is space-separated scopes ) type tokenLock struct { mu sync.Mutex // guards t; held while fetching or updating t t *oauth2.Token } type appEngineTokenSource struct { ctx context.Context scopes []string key string // to aeTokens map; space-separated scopes } func (ts *appEngineTokenSource) Token() (*oauth2.Token, error) { if appengineTokenFunc == nil { panic("google: AppEngineTokenSource can only be used on App Engine.") } aeTokensMu.Lock() tok, ok := aeTokens[ts.key] if !ok { tok = &tokenLock{} aeTokens[ts.key] = tok } aeTokensMu.Unlock() tok.mu.Lock() defer tok.mu.Unlock() if tok.t.Valid() { return tok.t, nil } access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...) if err != nil { return nil, err } tok.t = &oauth2.Token{ AccessToken: access, Expiry: exp, } return tok.t, nil } ================================================ FILE: vendor/golang.org/x/oauth2/google/appengine_hook.go ================================================ // Copyright 2015 The oauth2 Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build appengine appenginevm package google import "google.golang.org/appengine" func init() { appengineTokenFunc = appengine.AccessToken } ================================================ FILE: vendor/golang.org/x/oauth2/google/default.go ================================================ // Copyright 2015 The oauth2 Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package google import ( "encoding/json" "errors" "fmt" "io/ioutil" "net/http" "os" "path/filepath" "runtime" "golang.org/x/net/context" "golang.org/x/oauth2" "golang.org/x/oauth2/jwt" "google.golang.org/cloud/compute/metadata" ) // DefaultClient returns an HTTP Client that uses the // DefaultTokenSource to obtain authentication credentials. // // This client should be used when developing services // that run on Google App Engine or Google Compute Engine // and use "Application Default Credentials." // // For more details, see: // https://developers.google.com/accounts/docs/application-default-credentials // func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { ts, err := DefaultTokenSource(ctx, scope...) if err != nil { return nil, err } return oauth2.NewClient(ctx, ts), nil } // DefaultTokenSource is a token source that uses // "Application Default Credentials". // // It looks for credentials in the following places, // preferring the first location found: // // 1. A JSON file whose path is specified by the // GOOGLE_APPLICATION_CREDENTIALS environment variable. // 2. A JSON file in a location known to the gcloud command-line tool. // On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. // On other systems, $HOME/.config/gcloud/application_default_credentials.json. // 3. On Google App Engine it uses the appengine.AccessToken function. // 4. On Google Compute Engine, it fetches credentials from the metadata server. // (In this final case any provided scopes are ignored.) // // For more details, see: // https://developers.google.com/accounts/docs/application-default-credentials // func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) { // First, try the environment variable. const envVar = "GOOGLE_APPLICATION_CREDENTIALS" if filename := os.Getenv(envVar); filename != "" { ts, err := tokenSourceFromFile(ctx, filename, scope) if err != nil { return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err) } return ts, nil } // Second, try a well-known file. filename := wellKnownFile() _, err := os.Stat(filename) if err == nil { ts, err2 := tokenSourceFromFile(ctx, filename, scope) if err2 == nil { return ts, nil } err = err2 } else if os.IsNotExist(err) { err = nil // ignore this error } if err != nil { return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) } // Third, if we're on Google App Engine use those credentials. if appengineTokenFunc != nil { return AppEngineTokenSource(ctx, scope...), nil } // Fourth, if we're on Google Compute Engine use the metadata server. if metadata.OnGCE() { return ComputeTokenSource(""), nil } // None are found; return helpful error. const url = "https://developers.google.com/accounts/docs/application-default-credentials" return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url) } func wellKnownFile() string { const f = "application_default_credentials.json" if runtime.GOOS == "windows" { return filepath.Join(os.Getenv("APPDATA"), "gcloud", f) } return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f) } func tokenSourceFromFile(ctx context.Context, filename string, scopes []string) (oauth2.TokenSource, error) { b, err := ioutil.ReadFile(filename) if err != nil { return nil, err } var d struct { // Common fields Type string ClientID string `json:"client_id"` // User Credential fields ClientSecret string `json:"client_secret"` RefreshToken string `json:"refresh_token"` // Service Account fields ClientEmail string `json:"client_email"` PrivateKeyID string `json:"private_key_id"` PrivateKey string `json:"private_key"` } if err := json.Unmarshal(b, &d); err != nil { return nil, err } switch d.Type { case "authorized_user": cfg := &oauth2.Config{ ClientID: d.ClientID, ClientSecret: d.ClientSecret, Scopes: append([]string{}, scopes...), // copy Endpoint: Endpoint, } tok := &oauth2.Token{RefreshToken: d.RefreshToken} return cfg.TokenSource(ctx, tok), nil case "service_account": cfg := &jwt.Config{ Email: d.ClientEmail, PrivateKey: []byte(d.PrivateKey), Scopes: append([]string{}, scopes...), // copy TokenURL: JWTTokenURL, } return cfg.TokenSource(ctx), nil case "": return nil, errors.New("missing 'type' field in credentials") default: return nil, fmt.Errorf("unknown credential type: %q", d.Type) } } ================================================ FILE: vendor/golang.org/x/oauth2/google/google.go ================================================ // Copyright 2014 The oauth2 Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package google provides support for making OAuth2 authorized and // authenticated HTTP requests to Google APIs. // It supports the Web server flow, client-side credentials, service accounts, // Google Compute Engine service accounts, and Google App Engine service // accounts. // // For more information, please read // https://developers.google.com/accounts/docs/OAuth2 // and // https://developers.google.com/accounts/docs/application-default-credentials. package google import ( "encoding/json" "errors" "fmt" "strings" "time" "golang.org/x/oauth2" "golang.org/x/oauth2/jwt" "google.golang.org/cloud/compute/metadata" ) // Endpoint is Google's OAuth 2.0 endpoint. var Endpoint = oauth2.Endpoint{ AuthURL: "https://accounts.google.com/o/oauth2/auth", TokenURL: "https://accounts.google.com/o/oauth2/token", } // JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow. const JWTTokenURL = "https://accounts.google.com/o/oauth2/token" // ConfigFromJSON uses a Google Developers Console client_credentials.json // file to construct a config. // client_credentials.json can be downloadable from https://console.developers.google.com, // under "APIs & Auth" > "Credentials". Download the Web application credentials in the // JSON format and provide the contents of the file as jsonKey. func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) { type cred struct { ClientID string `json:"client_id"` ClientSecret string `json:"client_secret"` RedirectURIs []string `json:"redirect_uris"` AuthURI string `json:"auth_uri"` TokenURI string `json:"token_uri"` } var j struct { Web *cred `json:"web"` Installed *cred `json:"installed"` } if err := json.Unmarshal(jsonKey, &j); err != nil { return nil, err } var c *cred switch { case j.Web != nil: c = j.Web case j.Installed != nil: c = j.Installed default: return nil, fmt.Errorf("oauth2/google: no credentials found") } if len(c.RedirectURIs) < 1 { return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json") } return &oauth2.Config{ ClientID: c.ClientID, ClientSecret: c.ClientSecret, RedirectURL: c.RedirectURIs[0], Scopes: scope, Endpoint: oauth2.Endpoint{ AuthURL: c.AuthURI, TokenURL: c.TokenURI, }, }, nil } // JWTConfigFromJSON uses a Google Developers service account JSON key file to read // the credentials that authorize and authenticate the requests. // Create a service account on "Credentials" page under "APIs & Auth" for your // project at https://console.developers.google.com to download a JSON key file. func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { var key struct { Email string `json:"client_email"` PrivateKey string `json:"private_key"` } if err := json.Unmarshal(jsonKey, &key); err != nil { return nil, err } return &jwt.Config{ Email: key.Email, PrivateKey: []byte(key.PrivateKey), Scopes: scope, TokenURL: JWTTokenURL, }, nil } // ComputeTokenSource returns a token source that fetches access tokens // from Google Compute Engine (GCE)'s metadata server. It's only valid to use // this token source if your program is running on a GCE instance. // If no account is specified, "default" is used. // Further information about retrieving access tokens from the GCE metadata // server can be found at https://cloud.google.com/compute/docs/authentication. func ComputeTokenSource(account string) oauth2.TokenSource { return oauth2.ReuseTokenSource(nil, computeSource{account: account}) } type computeSource struct { account string } func (cs computeSource) Token() (*oauth2.Token, error) { if !metadata.OnGCE() { return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE") } acct := cs.account if acct == "" { acct = "default" } tokenJSON, err := metadata.Get("instance/service-accounts/" + acct + "/token") if err != nil { return nil, err } var res struct { AccessToken string `json:"access_token"` ExpiresInSec int `json:"expires_in"` TokenType string `json:"token_type"` } err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res) if err != nil { return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err) } if res.ExpiresInSec == 0 || res.AccessToken == "" { return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata") } return &oauth2.Token{ AccessToken: res.AccessToken, TokenType: res.TokenType, Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second), }, nil } ================================================ FILE: vendor/golang.org/x/oauth2/google/sdk.go ================================================ // Copyright 2015 The oauth2 Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package google import ( "encoding/json" "errors" "fmt" "net/http" "os" "os/user" "path/filepath" "runtime" "strings" "time" "golang.org/x/net/context" "golang.org/x/oauth2" "golang.org/x/oauth2/internal" ) type sdkCredentials struct { Data []struct { Credential struct { ClientID string `json:"client_id"` ClientSecret string `json:"client_secret"` AccessToken string `json:"access_token"` RefreshToken string `json:"refresh_token"` TokenExpiry *time.Time `json:"token_expiry"` } `json:"credential"` Key struct { Account string `json:"account"` Scope string `json:"scope"` } `json:"key"` } } // An SDKConfig provides access to tokens from an account already // authorized via the Google Cloud SDK. type SDKConfig struct { conf oauth2.Config initialToken *oauth2.Token } // NewSDKConfig creates an SDKConfig for the given Google Cloud SDK // account. If account is empty, the account currently active in // Google Cloud SDK properties is used. // Google Cloud SDK credentials must be created by running `gcloud auth` // before using this function. // The Google Cloud SDK is available at https://cloud.google.com/sdk/. func NewSDKConfig(account string) (*SDKConfig, error) { configPath, err := sdkConfigPath() if err != nil { return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err) } credentialsPath := filepath.Join(configPath, "credentials") f, err := os.Open(credentialsPath) if err != nil { return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err) } defer f.Close() var c sdkCredentials if err := json.NewDecoder(f).Decode(&c); err != nil { return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err) } if len(c.Data) == 0 { return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath) } if account == "" { propertiesPath := filepath.Join(configPath, "properties") f, err := os.Open(propertiesPath) if err != nil { return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err) } defer f.Close() ini, err := internal.ParseINI(f) if err != nil { return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err) } core, ok := ini["core"] if !ok { return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini) } active, ok := core["account"] if !ok { return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core) } account = active } for _, d := range c.Data { if account == "" || d.Key.Account == account { if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" { return nil, fmt.Errorf("oauth2/google: no token available for account %q", account) } var expiry time.Time if d.Credential.TokenExpiry != nil { expiry = *d.Credential.TokenExpiry } return &SDKConfig{ conf: oauth2.Config{ ClientID: d.Credential.ClientID, ClientSecret: d.Credential.ClientSecret, Scopes: strings.Split(d.Key.Scope, " "), Endpoint: Endpoint, RedirectURL: "oob", }, initialToken: &oauth2.Token{ AccessToken: d.Credential.AccessToken, RefreshToken: d.Credential.RefreshToken, Expiry: expiry, }, }, nil } } return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account) } // Client returns an HTTP client using Google Cloud SDK credentials to // authorize requests. The token will auto-refresh as necessary. The // underlying http.RoundTripper will be obtained using the provided // context. The returned client and its Transport should not be // modified. func (c *SDKConfig) Client(ctx context.Context) *http.Client { return &http.Client{ Transport: &oauth2.Transport{ Source: c.TokenSource(ctx), }, } } // TokenSource returns an oauth2.TokenSource that retrieve tokens from // Google Cloud SDK credentials using the provided context. // It will returns the current access token stored in the credentials, // and refresh it when it expires, but it won't update the credentials // with the new access token. func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource { return c.conf.TokenSource(ctx, c.initialToken) } // Scopes are the OAuth 2.0 scopes the current account is authorized for. func (c *SDKConfig) Scopes() []string { return c.conf.Scopes } // sdkConfigPath tries to guess where the gcloud config is located. // It can be overridden during tests. var sdkConfigPath = func() (string, error) { if runtime.GOOS == "windows" { return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil } homeDir := guessUnixHomeDir() if homeDir == "" { return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty") } return filepath.Join(homeDir, ".config", "gcloud"), nil } func guessUnixHomeDir() string { usr, err := user.Current() if err == nil { return usr.HomeDir } return os.Getenv("HOME") } ================================================ FILE: vendor/golang.org/x/oauth2/internal/oauth2.go ================================================ // Copyright 2014 The oauth2 Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package internal contains support packages for oauth2 package. package internal import ( "bufio" "crypto/rsa" "crypto/x509" "encoding/pem" "errors" "fmt" "io" "strings" ) // ParseKey converts the binary contents of a private key file // to an *rsa.PrivateKey. It detects whether the private key is in a // PEM container or not. If so, it extracts the the private key // from PEM container before conversion. It only supports PEM // containers with no passphrase. func ParseKey(key []byte) (*rsa.PrivateKey, error) { block, _ := pem.Decode(key) if block != nil { key = block.Bytes } parsedKey, err := x509.ParsePKCS8PrivateKey(key) if err != nil { parsedKey, err = x509.ParsePKCS1PrivateKey(key) if err != nil { return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err) } } parsed, ok := parsedKey.(*rsa.PrivateKey) if !ok { return nil, errors.New("private key is invalid") } return parsed, nil } func ParseINI(ini io.Reader) (map[string]map[string]string, error) { result := map[string]map[string]string{ "": map[string]string{}, // root section } scanner := bufio.NewScanner(ini) currentSection := "" for scanner.Scan() { line := strings.TrimSpace(scanner.Text()) if strings.HasPrefix(line, ";") { // comment. continue } if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { currentSection = strings.TrimSpace(line[1 : len(line)-1]) result[currentSection] = map[string]string{} continue } parts := strings.SplitN(line, "=", 2) if len(parts) == 2 && parts[0] != "" { result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1]) } } if err := scanner.Err(); err != nil { return nil, fmt.Errorf("error scanning ini: %v", err) } return result, nil } func CondVal(v string) []string { if v == "" { return nil } return []string{v} } ================================================ FILE: vendor/golang.org/x/oauth2/internal/token.go ================================================ // Copyright 2014 The oauth2 Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package internal contains support packages for oauth2 package. package internal import ( "encoding/json" "fmt" "io" "io/ioutil" "mime" "net/http" "net/url" "strconv" "strings" "time" "golang.org/x/net/context" ) // Token represents the crendentials used to authorize // the requests to access protected resources on the OAuth 2.0 // provider's backend. // // This type is a mirror of oauth2.Token and exists to break // an otherwise-circular dependency. Other internal packages // should convert this Token into an oauth2.Token before use. type Token struct { // AccessToken is the token that authorizes and authenticates // the requests. AccessToken string // TokenType is the type of token. // The Type method returns either this or "Bearer", the default. TokenType string // RefreshToken is a token that's used by the application // (as opposed to the user) to refresh the access token // if it expires. RefreshToken string // Expiry is the optional expiration time of the access token. // // If zero, TokenSource implementations will reuse the same // token forever and RefreshToken or equivalent // mechanisms for that TokenSource will not be used. Expiry time.Time // Raw optionally contains extra metadata from the server // when updating a token. Raw interface{} } // tokenJSON is the struct representing the HTTP response from OAuth2 // providers returning a token in JSON form. type tokenJSON struct { AccessToken string `json:"access_token"` TokenType string `json:"token_type"` RefreshToken string `json:"refresh_token"` ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number Expires expirationTime `json:"expires"` // broken Facebook spelling of expires_in } func (e *tokenJSON) expiry() (t time.Time) { if v := e.ExpiresIn; v != 0 { return time.Now().Add(time.Duration(v) * time.Second) } if v := e.Expires; v != 0 { return time.Now().Add(time.Duration(v) * time.Second) } return } type expirationTime int32 func (e *expirationTime) UnmarshalJSON(b []byte) error { var n json.Number err := json.Unmarshal(b, &n) if err != nil { return err } i, err := n.Int64() if err != nil { return err } *e = expirationTime(i) return nil } var brokenAuthHeaderProviders = []string{ "https://accounts.google.com/", "https://www.googleapis.com/", "https://github.com/", "https://api.instagram.com/", "https://www.douban.com/", "https://api.dropbox.com/", "https://api.soundcloud.com/", "https://www.linkedin.com/", "https://api.twitch.tv/", "https://oauth.vk.com/", "https://api.odnoklassniki.ru/", "https://connect.stripe.com/", "https://api.pushbullet.com/", "https://oauth.sandbox.trainingpeaks.com/", "https://oauth.trainingpeaks.com/", "https://www.strava.com/oauth/", "https://app.box.com/", "https://test-sandbox.auth.corp.google.com", "https://user.gini.net/", } // providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL // implements the OAuth2 spec correctly // See https://code.google.com/p/goauth2/issues/detail?id=31 for background. // In summary: // - Reddit only accepts client secret in the Authorization header // - Dropbox accepts either it in URL param or Auth header, but not both. // - Google only accepts URL param (not spec compliant?), not Auth header // - Stripe only accepts client secret in Auth header with Bearer method, not Basic func providerAuthHeaderWorks(tokenURL string) bool { for _, s := range brokenAuthHeaderProviders { if strings.HasPrefix(tokenURL, s) { // Some sites fail to implement the OAuth2 spec fully. return false } } // Assume the provider implements the spec properly // otherwise. We can add more exceptions as they're // discovered. We will _not_ be adding configurable hooks // to this package to let users select server bugs. return true } func RetrieveToken(ctx context.Context, ClientID, ClientSecret, TokenURL string, v url.Values) (*Token, error) { hc, err := ContextClient(ctx) if err != nil { return nil, err } v.Set("client_id", ClientID) bustedAuth := !providerAuthHeaderWorks(TokenURL) if bustedAuth && ClientSecret != "" { v.Set("client_secret", ClientSecret) } req, err := http.NewRequest("POST", TokenURL, strings.NewReader(v.Encode())) if err != nil { return nil, err } req.Header.Set("Content-Type", "application/x-www-form-urlencoded") if !bustedAuth { req.SetBasicAuth(ClientID, ClientSecret) } r, err := hc.Do(req) if err != nil { return nil, err } defer r.Body.Close() body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) if err != nil { return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) } if code := r.StatusCode; code < 200 || code > 299 { return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body) } var token *Token content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) switch content { case "application/x-www-form-urlencoded", "text/plain": vals, err := url.ParseQuery(string(body)) if err != nil { return nil, err } token = &Token{ AccessToken: vals.Get("access_token"), TokenType: vals.Get("token_type"), RefreshToken: vals.Get("refresh_token"), Raw: vals, } e := vals.Get("expires_in") if e == "" { // TODO(jbd): Facebook's OAuth2 implementation is broken and // returns expires_in field in expires. Remove the fallback to expires, // when Facebook fixes their implementation. e = vals.Get("expires") } expires, _ := strconv.Atoi(e) if expires != 0 { token.Expiry = time.Now().Add(time.Duration(expires) * time.Second) } default: var tj tokenJSON if err = json.Unmarshal(body, &tj); err != nil { return nil, err } token = &Token{ AccessToken: tj.AccessToken, TokenType: tj.TokenType, RefreshToken: tj.RefreshToken, Expiry: tj.expiry(), Raw: make(map[string]interface{}), } json.Unmarshal(body, &token.Raw) // no error checks for optional fields } // Don't overwrite `RefreshToken` with an empty value // if this was a token refreshing request. if token.RefreshToken == "" { token.RefreshToken = v.Get("refresh_token") } return token, nil } ================================================ FILE: vendor/golang.org/x/oauth2/internal/transport.go ================================================ // Copyright 2014 The oauth2 Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package internal contains support packages for oauth2 package. package internal import ( "net/http" "golang.org/x/net/context" ) // HTTPClient is the context key to use with golang.org/x/net/context's // WithValue function to associate an *http.Client value with a context. var HTTPClient ContextKey // ContextKey is just an empty struct. It exists so HTTPClient can be // an immutable public variable with a unique type. It's immutable // because nobody else can create a ContextKey, being unexported. type ContextKey struct{} // ContextClientFunc is a func which tries to return an *http.Client // given a Context value. If it returns an error, the search stops // with that error. If it returns (nil, nil), the search continues // down the list of registered funcs. type ContextClientFunc func(context.Context) (*http.Client, error) var contextClientFuncs []ContextClientFunc func RegisterContextClientFunc(fn ContextClientFunc) { contextClientFuncs = append(contextClientFuncs, fn) } func ContextClient(ctx context.Context) (*http.Client, error) { for _, fn := range contextClientFuncs { c, err := fn(ctx) if err != nil { return nil, err } if c != nil { return c, nil } } if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { return hc, nil } return http.DefaultClient, nil } func ContextTransport(ctx context.Context) http.RoundTripper { hc, err := ContextClient(ctx) // This is a rare error case (somebody using nil on App Engine). if err != nil { return ErrorTransport{err} } return hc.Transport } // ErrorTransport returns the specified error on RoundTrip. // This RoundTripper should be used in rare error cases where // error handling can be postponed to response handling time. type ErrorTransport struct{ Err error } func (t ErrorTransport) RoundTrip(*http.Request) (*http.Response, error) { return nil, t.Err } ================================================ FILE: vendor/golang.org/x/oauth2/jws/jws.go ================================================ // Copyright 2014 The oauth2 Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package jws provides encoding and decoding utilities for // signed JWS messages. package jws import ( "bytes" "crypto" "crypto/rand" "crypto/rsa" "crypto/sha256" "encoding/base64" "encoding/json" "errors" "fmt" "strings" "time" ) // ClaimSet contains information about the JWT signature including the // permissions being requested (scopes), the target of the token, the issuer, // the time the token was issued, and the lifetime of the token. type ClaimSet struct { Iss string `json:"iss"` // email address of the client_id of the application making the access token request Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional). Exp int64 `json:"exp"` // the expiration time of the assertion Iat int64 `json:"iat"` // the time the assertion was issued. Typ string `json:"typ,omitempty"` // token type (Optional). // Email for which the application is requesting delegated access (Optional). Sub string `json:"sub,omitempty"` // The old name of Sub. Client keeps setting Prn to be // complaint with legacy OAuth 2.0 providers. (Optional) Prn string `json:"prn,omitempty"` // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 // This array is marshalled using custom code (see (c *ClaimSet) encode()). PrivateClaims map[string]interface{} `json:"-"` exp time.Time iat time.Time } func (c *ClaimSet) encode() (string, error) { if c.exp.IsZero() || c.iat.IsZero() { // Reverting time back for machines whose time is not perfectly in sync. // If client machine's time is in the future according // to Google servers, an access token will not be issued. now := time.Now().Add(-10 * time.Second) c.iat = now c.exp = now.Add(time.Hour) } c.Exp = c.exp.Unix() c.Iat = c.iat.Unix() b, err := json.Marshal(c) if err != nil { return "", err } if len(c.PrivateClaims) == 0 { return base64Encode(b), nil } // Marshal private claim set and then append it to b. prv, err := json.Marshal(c.PrivateClaims) if err != nil { return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims) } // Concatenate public and private claim JSON objects. if !bytes.HasSuffix(b, []byte{'}'}) { return "", fmt.Errorf("jws: invalid JSON %s", b) } if !bytes.HasPrefix(prv, []byte{'{'}) { return "", fmt.Errorf("jws: invalid JSON %s", prv) } b[len(b)-1] = ',' // Replace closing curly brace with a comma. b = append(b, prv[1:]...) // Append private claims. return base64Encode(b), nil } // Header represents the header for the signed JWS payloads. type Header struct { // The algorithm used for signature. Algorithm string `json:"alg"` // Represents the token type. Typ string `json:"typ"` } func (h *Header) encode() (string, error) { b, err := json.Marshal(h) if err != nil { return "", err } return base64Encode(b), nil } // Decode decodes a claim set from a JWS payload. func Decode(payload string) (*ClaimSet, error) { // decode returned id token to get expiry s := strings.Split(payload, ".") if len(s) < 2 { // TODO(jbd): Provide more context about the error. return nil, errors.New("jws: invalid token received") } decoded, err := base64Decode(s[1]) if err != nil { return nil, err } c := &ClaimSet{} err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c) return c, err } // Encode encodes a signed JWS with provided header and claim set. func Encode(header *Header, c *ClaimSet, signature *rsa.PrivateKey) (string, error) { head, err := header.encode() if err != nil { return "", err } cs, err := c.encode() if err != nil { return "", err } ss := fmt.Sprintf("%s.%s", head, cs) h := sha256.New() h.Write([]byte(ss)) b, err := rsa.SignPKCS1v15(rand.Reader, signature, crypto.SHA256, h.Sum(nil)) if err != nil { return "", err } sig := base64Encode(b) return fmt.Sprintf("%s.%s", ss, sig), nil } // base64Encode returns and Base64url encoded version of the input string with any // trailing "=" stripped. func base64Encode(b []byte) string { return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") } // base64Decode decodes the Base64url encoded string func base64Decode(s string) ([]byte, error) { // add back missing padding switch len(s) % 4 { case 2: s += "==" case 3: s += "=" } return base64.URLEncoding.DecodeString(s) } ================================================ FILE: vendor/golang.org/x/oauth2/jwt/jwt.go ================================================ // Copyright 2014 The oauth2 Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly // known as "two-legged OAuth 2.0". // // See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12 package jwt import ( "encoding/json" "fmt" "io" "io/ioutil" "net/http" "net/url" "strings" "time" "golang.org/x/net/context" "golang.org/x/oauth2" "golang.org/x/oauth2/internal" "golang.org/x/oauth2/jws" ) var ( defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" defaultHeader = &jws.Header{Algorithm: "RS256", Typ: "JWT"} ) // Config is the configuration for using JWT to fetch tokens, // commonly known as "two-legged OAuth 2.0". type Config struct { // Email is the OAuth client identifier used when communicating with // the configured OAuth provider. Email string // PrivateKey contains the contents of an RSA private key or the // contents of a PEM file that contains a private key. The provided // private key is used to sign JWT payloads. // PEM containers with a passphrase are not supported. // Use the following command to convert a PKCS 12 file into a PEM. // // $ openssl pkcs12 -in key.p12 -out key.pem -nodes // PrivateKey []byte // Subject is the optional user to impersonate. Subject string // Scopes optionally specifies a list of requested permission scopes. Scopes []string // TokenURL is the endpoint required to complete the 2-legged JWT flow. TokenURL string } // TokenSource returns a JWT TokenSource using the configuration // in c and the HTTP client from the provided context. func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c}) } // Client returns an HTTP client wrapping the context's // HTTP transport and adding Authorization headers with tokens // obtained from c. // // The returned client and its Transport should not be modified. func (c *Config) Client(ctx context.Context) *http.Client { return oauth2.NewClient(ctx, c.TokenSource(ctx)) } // jwtSource is a source that always does a signed JWT request for a token. // It should typically be wrapped with a reuseTokenSource. type jwtSource struct { ctx context.Context conf *Config } func (js jwtSource) Token() (*oauth2.Token, error) { pk, err := internal.ParseKey(js.conf.PrivateKey) if err != nil { return nil, err } hc := oauth2.NewClient(js.ctx, nil) claimSet := &jws.ClaimSet{ Iss: js.conf.Email, Scope: strings.Join(js.conf.Scopes, " "), Aud: js.conf.TokenURL, } if subject := js.conf.Subject; subject != "" { claimSet.Sub = subject // prn is the old name of sub. Keep setting it // to be compatible with legacy OAuth 2.0 providers. claimSet.Prn = subject } payload, err := jws.Encode(defaultHeader, claimSet, pk) if err != nil { return nil, err } v := url.Values{} v.Set("grant_type", defaultGrantType) v.Set("assertion", payload) resp, err := hc.PostForm(js.conf.TokenURL, v) if err != nil { return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) } defer resp.Body.Close() body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) if err != nil { return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) } if c := resp.StatusCode; c < 200 || c > 299 { return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", resp.Status, body) } // tokenRes is the JSON response body. var tokenRes struct { AccessToken string `json:"access_token"` TokenType string `json:"token_type"` IDToken string `json:"id_token"` ExpiresIn int64 `json:"expires_in"` // relative seconds from now } if err := json.Unmarshal(body, &tokenRes); err != nil { return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) } token := &oauth2.Token{ AccessToken: tokenRes.AccessToken, TokenType: tokenRes.TokenType, } raw := make(map[string]interface{}) json.Unmarshal(body, &raw) // no error checks for optional fields token = token.WithExtra(raw) if secs := tokenRes.ExpiresIn; secs > 0 { token.Expiry = time.Now().Add(time.Duration(secs) * time.Second) } if v := tokenRes.IDToken; v != "" { // decode returned id token to get expiry claimSet, err := jws.Decode(v) if err != nil { return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err) } token.Expiry = time.Unix(claimSet.Exp, 0) } return token, nil } ================================================ FILE: vendor/golang.org/x/oauth2/linkedin/linkedin.go ================================================ // Copyright 2015 The oauth2 Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package linkedin provides constants for using OAuth2 to access LinkedIn. package linkedin import ( "golang.org/x/oauth2" ) // Endpoint is LinkedIn's OAuth 2.0 endpoint. var Endpoint = oauth2.Endpoint{ AuthURL: "https://www.linkedin.com/uas/oauth2/authorization", TokenURL: "https://www.linkedin.com/uas/oauth2/accessToken", } ================================================ FILE: vendor/golang.org/x/oauth2/oauth2.go ================================================ // Copyright 2014 The oauth2 Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package oauth2 provides support for making // OAuth2 authorized and authenticated HTTP requests. // It can additionally grant authorization with Bearer JWT. package oauth2 import ( "bytes" "errors" "net/http" "net/url" "strings" "sync" "golang.org/x/net/context" "golang.org/x/oauth2/internal" ) // NoContext is the default context you should supply if not using // your own context.Context (see https://golang.org/x/net/context). var NoContext = context.TODO() // Config describes a typical 3-legged OAuth2 flow, with both the // client application information and the server's endpoint URLs. type Config struct { // ClientID is the application's ID. ClientID string // ClientSecret is the application's secret. ClientSecret string // Endpoint contains the resource server's token endpoint // URLs. These are constants specific to each server and are // often available via site-specific packages, such as // google.Endpoint or github.Endpoint. Endpoint Endpoint // RedirectURL is the URL to redirect users going through // the OAuth flow, after the resource owner's URLs. RedirectURL string // Scope specifies optional requested permissions. Scopes []string } // A TokenSource is anything that can return a token. type TokenSource interface { // Token returns a token or an error. // Token must be safe for concurrent use by multiple goroutines. // The returned Token must not be modified. Token() (*Token, error) } // Endpoint contains the OAuth 2.0 provider's authorization and token // endpoint URLs. type Endpoint struct { AuthURL string TokenURL string } var ( // AccessTypeOnline and AccessTypeOffline are options passed // to the Options.AuthCodeURL method. They modify the // "access_type" field that gets sent in the URL returned by // AuthCodeURL. // // Online is the default if neither is specified. If your // application needs to refresh access tokens when the user // is not present at the browser, then use offline. This will // result in your application obtaining a refresh token the // first time your application exchanges an authorization // code for a user. AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online") AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline") // ApprovalForce forces the users to view the consent dialog // and confirm the permissions request at the URL returned // from AuthCodeURL, even if they've already done so. ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force") ) // An AuthCodeOption is passed to Config.AuthCodeURL. type AuthCodeOption interface { setValue(url.Values) } type setParam struct{ k, v string } func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) } // SetAuthURLParam builds an AuthCodeOption which passes key/value parameters // to a provider's authorization endpoint. func SetAuthURLParam(key, value string) AuthCodeOption { return setParam{key, value} } // AuthCodeURL returns a URL to OAuth 2.0 provider's consent page // that asks for permissions for the required scopes explicitly. // // State is a token to protect the user from CSRF attacks. You must // always provide a non-zero string and validate that it matches the // the state query parameter on your redirect callback. // See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. // // Opts may include AccessTypeOnline or AccessTypeOffline, as well // as ApprovalForce. func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { var buf bytes.Buffer buf.WriteString(c.Endpoint.AuthURL) v := url.Values{ "response_type": {"code"}, "client_id": {c.ClientID}, "redirect_uri": internal.CondVal(c.RedirectURL), "scope": internal.CondVal(strings.Join(c.Scopes, " ")), "state": internal.CondVal(state), } for _, opt := range opts { opt.setValue(v) } if strings.Contains(c.Endpoint.AuthURL, "?") { buf.WriteByte('&') } else { buf.WriteByte('?') } buf.WriteString(v.Encode()) return buf.String() } // PasswordCredentialsToken converts a resource owner username and password // pair into a token. // // Per the RFC, this grant type should only be used "when there is a high // degree of trust between the resource owner and the client (e.g., the client // is part of the device operating system or a highly privileged application), // and when other authorization grant types are not available." // See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. // // The HTTP client to use is derived from the context. // If nil, http.DefaultClient is used. func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { return retrieveToken(ctx, c, url.Values{ "grant_type": {"password"}, "username": {username}, "password": {password}, "scope": internal.CondVal(strings.Join(c.Scopes, " ")), }) } // Exchange converts an authorization code into a token. // // It is used after a resource provider redirects the user back // to the Redirect URI (the URL obtained from AuthCodeURL). // // The HTTP client to use is derived from the context. // If a client is not provided via the context, http.DefaultClient is used. // // The code will be in the *http.Request.FormValue("code"). Before // calling Exchange, be sure to validate FormValue("state"). func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) { return retrieveToken(ctx, c, url.Values{ "grant_type": {"authorization_code"}, "code": {code}, "redirect_uri": internal.CondVal(c.RedirectURL), "scope": internal.CondVal(strings.Join(c.Scopes, " ")), }) } // Client returns an HTTP client using the provided token. // The token will auto-refresh as necessary. The underlying // HTTP transport will be obtained using the provided context. // The returned client and its Transport should not be modified. func (c *Config) Client(ctx context.Context, t *Token) *http.Client { return NewClient(ctx, c.TokenSource(ctx, t)) } // TokenSource returns a TokenSource that returns t until t expires, // automatically refreshing it as necessary using the provided context. // // Most users will use Config.Client instead. func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { tkr := &tokenRefresher{ ctx: ctx, conf: c, } if t != nil { tkr.refreshToken = t.RefreshToken } return &reuseTokenSource{ t: t, new: tkr, } } // tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token" // HTTP requests to renew a token using a RefreshToken. type tokenRefresher struct { ctx context.Context // used to get HTTP requests conf *Config refreshToken string } // WARNING: Token is not safe for concurrent access, as it // updates the tokenRefresher's refreshToken field. // Within this package, it is used by reuseTokenSource which // synchronizes calls to this method with its own mutex. func (tf *tokenRefresher) Token() (*Token, error) { if tf.refreshToken == "" { return nil, errors.New("oauth2: token expired and refresh token is not set") } tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{ "grant_type": {"refresh_token"}, "refresh_token": {tf.refreshToken}, }) if err != nil { return nil, err } if tf.refreshToken != tk.RefreshToken { tf.refreshToken = tk.RefreshToken } return tk, err } // reuseTokenSource is a TokenSource that holds a single token in memory // and validates its expiry before each call to retrieve it with // Token. If it's expired, it will be auto-refreshed using the // new TokenSource. type reuseTokenSource struct { new TokenSource // called when t is expired. mu sync.Mutex // guards t t *Token } // Token returns the current token if it's still valid, else will // refresh the current token (using r.Context for HTTP client // information) and return the new one. func (s *reuseTokenSource) Token() (*Token, error) { s.mu.Lock() defer s.mu.Unlock() if s.t.Valid() { return s.t, nil } t, err := s.new.Token() if err != nil { return nil, err } s.t = t return t, nil } // StaticTokenSource returns a TokenSource that always returns the same token. // Because the provided token t is never refreshed, StaticTokenSource is only // useful for tokens that never expire. func StaticTokenSource(t *Token) TokenSource { return staticTokenSource{t} } // staticTokenSource is a TokenSource that always returns the same Token. type staticTokenSource struct { t *Token } func (s staticTokenSource) Token() (*Token, error) { return s.t, nil } // HTTPClient is the context key to use with golang.org/x/net/context's // WithValue function to associate an *http.Client value with a context. var HTTPClient internal.ContextKey // NewClient creates an *http.Client from a Context and TokenSource. // The returned client is not valid beyond the lifetime of the context. // // As a special case, if src is nil, a non-OAuth2 client is returned // using the provided context. This exists to support related OAuth2 // packages. func NewClient(ctx context.Context, src TokenSource) *http.Client { if src == nil { c, err := internal.ContextClient(ctx) if err != nil { return &http.Client{Transport: internal.ErrorTransport{err}} } return c } return &http.Client{ Transport: &Transport{ Base: internal.ContextTransport(ctx), Source: ReuseTokenSource(nil, src), }, } } // ReuseTokenSource returns a TokenSource which repeatedly returns the // same token as long as it's valid, starting with t. // When its cached token is invalid, a new token is obtained from src. // // ReuseTokenSource is typically used to reuse tokens from a cache // (such as a file on disk) between runs of a program, rather than // obtaining new tokens unnecessarily. // // The initial token t may be nil, in which case the TokenSource is // wrapped in a caching version if it isn't one already. This also // means it's always safe to wrap ReuseTokenSource around any other // TokenSource without adverse effects. func ReuseTokenSource(t *Token, src TokenSource) TokenSource { // Don't wrap a reuseTokenSource in itself. That would work, // but cause an unnecessary number of mutex operations. // Just build the equivalent one. if rt, ok := src.(*reuseTokenSource); ok { if t == nil { // Just use it directly. return rt } src = rt.new } return &reuseTokenSource{ t: t, new: src, } } ================================================ FILE: vendor/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go ================================================ // Copyright 2015 The oauth2 Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package odnoklassniki provides constants for using OAuth2 to access Odnoklassniki. package odnoklassniki import ( "golang.org/x/oauth2" ) // Endpoint is Odnoklassniki's OAuth 2.0 endpoint. var Endpoint = oauth2.Endpoint{ AuthURL: "https://www.odnoklassniki.ru/oauth/authorize", TokenURL: "https://api.odnoklassniki.ru/oauth/token.do", } ================================================ FILE: vendor/golang.org/x/oauth2/paypal/paypal.go ================================================ // Copyright 2015 The oauth2 Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package paypal provides constants for using OAuth2 to access PayPal. package paypal import ( "golang.org/x/oauth2" ) // Endpoint is PayPal's OAuth 2.0 endpoint in live (production) environment. var Endpoint = oauth2.Endpoint{ AuthURL: "https://www.paypal.com/webapps/auth/protocol/openidconnect/v1/authorize", TokenURL: "https://api.paypal.com/v1/identity/openidconnect/tokenservice", } // SandboxEndpoint is PayPal's OAuth 2.0 endpoint in sandbox (testing) environment. var SandboxEndpoint = oauth2.Endpoint{ AuthURL: "https://www.sandbox.paypal.com/webapps/auth/protocol/openidconnect/v1/authorize", TokenURL: "https://api.sandbox.paypal.com/v1/identity/openidconnect/tokenservice", } ================================================ FILE: vendor/golang.org/x/oauth2/token.go ================================================ // Copyright 2014 The oauth2 Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package oauth2 import ( "net/http" "net/url" "strings" "time" "golang.org/x/net/context" "golang.org/x/oauth2/internal" ) // expiryDelta determines how earlier a token should be considered // expired than its actual expiration time. It is used to avoid late // expirations due to client-server time mismatches. const expiryDelta = 10 * time.Second // Token represents the crendentials used to authorize // the requests to access protected resources on the OAuth 2.0 // provider's backend. // // Most users of this package should not access fields of Token // directly. They're exported mostly for use by related packages // implementing derivative OAuth2 flows. type Token struct { // AccessToken is the token that authorizes and authenticates // the requests. AccessToken string `json:"access_token"` // TokenType is the type of token. // The Type method returns either this or "Bearer", the default. TokenType string `json:"token_type,omitempty"` // RefreshToken is a token that's used by the application // (as opposed to the user) to refresh the access token // if it expires. RefreshToken string `json:"refresh_token,omitempty"` // Expiry is the optional expiration time of the access token. // // If zero, TokenSource implementations will reuse the same // token forever and RefreshToken or equivalent // mechanisms for that TokenSource will not be used. Expiry time.Time `json:"expiry,omitempty"` // raw optionally contains extra metadata from the server // when updating a token. raw interface{} } // Type returns t.TokenType if non-empty, else "Bearer". func (t *Token) Type() string { if strings.EqualFold(t.TokenType, "bearer") { return "Bearer" } if strings.EqualFold(t.TokenType, "mac") { return "MAC" } if strings.EqualFold(t.TokenType, "basic") { return "Basic" } if t.TokenType != "" { return t.TokenType } return "Bearer" } // SetAuthHeader sets the Authorization header to r using the access // token in t. // // This method is unnecessary when using Transport or an HTTP Client // returned by this package. func (t *Token) SetAuthHeader(r *http.Request) { r.Header.Set("Authorization", t.Type()+" "+t.AccessToken) } // WithExtra returns a new Token that's a clone of t, but using the // provided raw extra map. This is only intended for use by packages // implementing derivative OAuth2 flows. func (t *Token) WithExtra(extra interface{}) *Token { t2 := new(Token) *t2 = *t t2.raw = extra return t2 } // Extra returns an extra field. // Extra fields are key-value pairs returned by the server as a // part of the token retrieval response. func (t *Token) Extra(key string) interface{} { if vals, ok := t.raw.(url.Values); ok { // TODO(jbd): Cast numeric values to int64 or float64. return vals.Get(key) } if raw, ok := t.raw.(map[string]interface{}); ok { return raw[key] } return nil } // expired reports whether the token is expired. // t must be non-nil. func (t *Token) expired() bool { if t.Expiry.IsZero() { return false } return t.Expiry.Add(-expiryDelta).Before(time.Now()) } // Valid reports whether t is non-nil, has an AccessToken, and is not expired. func (t *Token) Valid() bool { return t != nil && t.AccessToken != "" && !t.expired() } // tokenFromInternal maps an *internal.Token struct into // a *Token struct. func tokenFromInternal(t *internal.Token) *Token { if t == nil { return nil } return &Token{ AccessToken: t.AccessToken, TokenType: t.TokenType, RefreshToken: t.RefreshToken, Expiry: t.Expiry, raw: t.Raw, } } // retrieveToken takes a *Config and uses that to retrieve an *internal.Token. // This token is then mapped from *internal.Token into an *oauth2.Token which is returned along // with an error.. func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v) if err != nil { return nil, err } return tokenFromInternal(tk), nil } ================================================ FILE: vendor/golang.org/x/oauth2/transport.go ================================================ // Copyright 2014 The oauth2 Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package oauth2 import ( "errors" "io" "net/http" "sync" ) // Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests, // wrapping a base RoundTripper and adding an Authorization header // with a token from the supplied Sources. // // Transport is a low-level mechanism. Most code will use the // higher-level Config.Client method instead. type Transport struct { // Source supplies the token to add to outgoing requests' // Authorization headers. Source TokenSource // Base is the base RoundTripper used to make HTTP requests. // If nil, http.DefaultTransport is used. Base http.RoundTripper mu sync.Mutex // guards modReq modReq map[*http.Request]*http.Request // original -> modified } // RoundTrip authorizes and authenticates the request with an // access token. If no token exists or token is expired, // tries to refresh/fetch a new token. func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { if t.Source == nil { return nil, errors.New("oauth2: Transport's Source is nil") } token, err := t.Source.Token() if err != nil { return nil, err } req2 := cloneRequest(req) // per RoundTripper contract token.SetAuthHeader(req2) t.setModReq(req, req2) res, err := t.base().RoundTrip(req2) if err != nil { t.setModReq(req, nil) return nil, err } res.Body = &onEOFReader{ rc: res.Body, fn: func() { t.setModReq(req, nil) }, } return res, nil } // CancelRequest cancels an in-flight request by closing its connection. func (t *Transport) CancelRequest(req *http.Request) { type canceler interface { CancelRequest(*http.Request) } if cr, ok := t.base().(canceler); ok { t.mu.Lock() modReq := t.modReq[req] delete(t.modReq, req) t.mu.Unlock() cr.CancelRequest(modReq) } } func (t *Transport) base() http.RoundTripper { if t.Base != nil { return t.Base } return http.DefaultTransport } func (t *Transport) setModReq(orig, mod *http.Request) { t.mu.Lock() defer t.mu.Unlock() if t.modReq == nil { t.modReq = make(map[*http.Request]*http.Request) } if mod == nil { delete(t.modReq, orig) } else { t.modReq[orig] = mod } } // cloneRequest returns a clone of the provided *http.Request. // The clone is a shallow copy of the struct and its Header map. func cloneRequest(r *http.Request) *http.Request { // shallow copy of the struct r2 := new(http.Request) *r2 = *r // deep copy of the Header r2.Header = make(http.Header, len(r.Header)) for k, s := range r.Header { r2.Header[k] = append([]string(nil), s...) } return r2 } type onEOFReader struct { rc io.ReadCloser fn func() } func (r *onEOFReader) Read(p []byte) (n int, err error) { n, err = r.rc.Read(p) if err == io.EOF { r.runFunc() } return } func (r *onEOFReader) Close() error { err := r.rc.Close() r.runFunc() return err } func (r *onEOFReader) runFunc() { if fn := r.fn; fn != nil { fn() r.fn = nil } } ================================================ FILE: vendor/golang.org/x/oauth2/vk/vk.go ================================================ // Copyright 2015 The oauth2 Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package vk provides constants for using OAuth2 to access VK.com. package vk import ( "golang.org/x/oauth2" ) // Endpoint is VK's OAuth 2.0 endpoint. var Endpoint = oauth2.Endpoint{ AuthURL: "https://oauth.vk.com/authorize", TokenURL: "https://oauth.vk.com/access_token", } ================================================ FILE: vendor/google.golang.org/api/LICENSE ================================================ Copyright (c) 2011 Google Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================ FILE: vendor/google.golang.org/api/bigquery/v2/bigquery-api.json ================================================ { "kind": "discovery#restDescription", "etag": "\"ye6orv2F-1npMW3u9suM3a7C5Bo/n2LVhGPabQO3DmbKxkomJprJEEo\"", "discoveryVersion": "v1", "id": "bigquery:v2", "name": "bigquery", "version": "v2", "revision": "20141112", "title": "BigQuery API", "description": "A data platform for customers to create, manage, share and query data.", "ownerDomain": "google.com", "ownerName": "Google", "icons": { "x16": "https://www.google.com/images/icons/product/search-16.gif", "x32": "https://www.google.com/images/icons/product/search-32.gif" }, "documentationLink": "https://cloud.google.com/bigquery/", "protocol": "rest", "baseUrl": "https://www.googleapis.com/bigquery/v2/", "basePath": "/bigquery/v2/", "rootUrl": "https://www.googleapis.com/", "servicePath": "bigquery/v2/", "batchPath": "batch", "parameters": { "alt": { "type": "string", "description": "Data format for the response.", "default": "json", "enum": [ "csv", "json" ], "enumDescriptions": [ "Responses with Content-Type of text/csv", "Responses with Content-Type of application/json" ], "location": "query" }, "fields": { "type": "string", "description": "Selector specifying which fields to include in a partial response.", "location": "query" }, "key": { "type": "string", "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", "location": "query" }, "oauth_token": { "type": "string", "description": "OAuth 2.0 token for the current user.", "location": "query" }, "prettyPrint": { "type": "boolean", "description": "Returns response with indentations and line breaks.", "default": "true", "location": "query" }, "quotaUser": { "type": "string", "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.", "location": "query" }, "userIp": { "type": "string", "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.", "location": "query" } }, "auth": { "oauth2": { "scopes": { "https://www.googleapis.com/auth/bigquery": { "description": "View and manage your data in Google BigQuery" }, "https://www.googleapis.com/auth/bigquery.insertdata": { "description": "Insert data into Google BigQuery" }, "https://www.googleapis.com/auth/cloud-platform": { "description": "View and manage your data across Google Cloud Platform services" }, "https://www.googleapis.com/auth/devstorage.full_control": { "description": "Manage your data and permissions in Google Cloud Storage" }, "https://www.googleapis.com/auth/devstorage.read_only": { "description": "View your data in Google Cloud Storage" }, "https://www.googleapis.com/auth/devstorage.read_write": { "description": "Manage your data in Google Cloud Storage" } } } }, "schemas": { "CsvOptions": { "id": "CsvOptions", "type": "object", "properties": { "allowJaggedRows": { "type": "boolean", "description": "[Optional] Indicates if BigQuery should accept rows that are missing trailing optional columns. If true, BigQuery treats missing trailing columns as null values. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false." }, "allowQuotedNewlines": { "type": "boolean", "description": "[Optional] Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false." }, "encoding": { "type": "string", "description": "[Optional] The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties." }, "fieldDelimiter": { "type": "string", "description": "[Optional] The separator for fields in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence \"\\t\" to specify a tab separator. The default value is a comma (',')." }, "quote": { "type": "string", "description": "[Optional] The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('\"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true." }, "skipLeadingRows": { "type": "integer", "description": "[Optional] The number of rows at the top of a CSV file that BigQuery will skip when reading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped.", "format": "int32" } } }, "Dataset": { "id": "Dataset", "type": "object", "properties": { "access": { "type": "array", "description": "[Optional] An array of objects that define dataset access for one or more entities. You can set this property when inserting or updating a dataset in order to control who is allowed to access the data. If unspecified at dataset creation time, BigQuery adds default dataset access for the following entities: access.specialGroup: projectReaders; access.role: READER; access.specialGroup: projectWriters; access.role: WRITER; access.specialGroup: projectOwners; access.role: OWNER; access.userByEmail: [dataset creator email]; access.role: OWNER;", "items": { "type": "object", "properties": { "domain": { "type": "string", "description": "[Pick one] A domain to grant access to. Any users signed in with the domain specified will be granted the specified access. Example: \"example.com\"." }, "groupByEmail": { "type": "string", "description": "[Pick one] An email address of a Google Group to grant access to." }, "role": { "type": "string", "description": "[Required] Describes the rights granted to the user specified by the other member of the access object. The following string values are supported: READER, WRITER, OWNER." }, "specialGroup": { "type": "string", "description": "[Pick one] A special group to grant access to. Possible values include: projectOwners: Owners of the enclosing project. projectReaders: Readers of the enclosing project. projectWriters: Writers of the enclosing project. allAuthenticatedUsers: All authenticated BigQuery users." }, "userByEmail": { "type": "string", "description": "[Pick one] An email address of a user to grant access to. For example: fred@example.com." }, "view": { "$ref": "TableReference", "description": "[Pick one] A view from a different dataset to grant access to. Queries executed against that view will have read access to tables in this dataset. The role field is not required when this field is set. If that view is updated by any user, access to the view needs to be granted again via an update operation." } } } }, "creationTime": { "type": "string", "description": "[Output-only] The time when this dataset was created, in milliseconds since the epoch.", "format": "int64" }, "datasetReference": { "$ref": "DatasetReference", "description": "[Required] A reference that identifies the dataset." }, "defaultTableExpirationMs": { "type": "string", "description": "[Experimental] The default lifetime of all tables in the dataset, in milliseconds. The minimum value is 3600000 milliseconds (one hour). Once this property is set, all newly-created tables in the dataset will have an expirationTime property set to the creation time plus the value in this property, and changing the value will only affect new tables, not existing ones. When the expirationTime for a given table is reached, that table will be deleted automatically. If a table's expirationTime is modified or removed before the table expires, or if you provide an explicit expirationTime when creating a table, that value takes precedence over the default expiration time indicated by this property.", "format": "int64" }, "description": { "type": "string", "description": "[Optional] A user-friendly description of the dataset." }, "etag": { "type": "string", "description": "[Output-only] A hash of the resource." }, "friendlyName": { "type": "string", "description": "[Optional] A descriptive name for the dataset." }, "id": { "type": "string", "description": "[Output-only] The fully-qualified unique name of the dataset in the format projectId:datasetId. The dataset name without the project name is given in the datasetId field. When creating a new dataset, leave this field blank, and instead specify the datasetId field." }, "kind": { "type": "string", "description": "[Output-only] The resource type.", "default": "bigquery#dataset" }, "lastModifiedTime": { "type": "string", "description": "[Output-only] The date when this dataset or any of its tables was last modified, in milliseconds since the epoch.", "format": "int64" }, "location": { "type": "string", "description": "[Experimental] The location where the data resides. If not present, the data will be stored in the US." }, "selfLink": { "type": "string", "description": "[Output-only] A URL that can be used to access the resource again. You can use this URL in Get or Update requests to the resource." } } }, "DatasetList": { "id": "DatasetList", "type": "object", "properties": { "datasets": { "type": "array", "description": "An array of the dataset resources in the project. Each resource contains basic information. For full information about a particular dataset resource, use the Datasets: get method. This property is omitted when there are no datasets in the project.", "items": { "type": "object", "properties": { "datasetReference": { "$ref": "DatasetReference", "description": "The dataset reference. Use this property to access specific parts of the dataset's ID, such as project ID or dataset ID." }, "friendlyName": { "type": "string", "description": "A descriptive name for the dataset, if one exists." }, "id": { "type": "string", "description": "The fully-qualified, unique, opaque ID of the dataset." }, "kind": { "type": "string", "description": "The resource type. This property always returns the value \"bigquery#dataset\".", "default": "bigquery#dataset" } } } }, "etag": { "type": "string", "description": "A hash value of the results page. You can use this property to determine if the page has changed since the last request." }, "kind": { "type": "string", "description": "The list type. This property always returns the value \"bigquery#datasetList\".", "default": "bigquery#datasetList" }, "nextPageToken": { "type": "string", "description": "A token that can be used to request the next results page. This property is omitted on the final results page." } } }, "DatasetReference": { "id": "DatasetReference", "type": "object", "properties": { "datasetId": { "type": "string", "description": "[Required] A unique ID for this dataset, without the project name. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.", "annotations": { "required": [ "bigquery.datasets.update" ] } }, "projectId": { "type": "string", "description": "[Optional] The ID of the project containing this dataset.", "annotations": { "required": [ "bigquery.datasets.update" ] } } } }, "ErrorProto": { "id": "ErrorProto", "type": "object", "properties": { "debugInfo": { "type": "string", "description": "Debugging information. This property is internal to Google and should not be used." }, "location": { "type": "string", "description": "Specifies where the error occurred, if present." }, "message": { "type": "string", "description": "A human-readable description of the error." }, "reason": { "type": "string", "description": "A short error code that summarizes the error." } } }, "ExternalDataConfiguration": { "id": "ExternalDataConfiguration", "type": "object", "properties": { "compression": { "type": "string", "description": "[Optional] The compression type of the data source. Possible values include GZIP and NONE. The default value is NONE." }, "csvOptions": { "$ref": "CsvOptions", "description": "Additional properties to set if sourceFormat is set to CSV." }, "ignoreUnknownValues": { "type": "boolean", "description": "[Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns" }, "maxBadRecords": { "type": "integer", "description": "[Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.", "format": "int32" }, "schema": { "$ref": "TableSchema", "description": "[Required] The schema for the data." }, "sourceFormat": { "type": "string", "description": "[Optional] The data format. External data sources must be in CSV format. The default value is CSV." }, "sourceUris": { "type": "array", "description": "[Required] The fully-qualified URIs that point to your data in Google Cloud Storage. Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. CSV limits related to load jobs apply to external data sources, plus an additional limit of 10 GB maximum size across all URIs.", "items": { "type": "string" } } } }, "GetQueryResultsResponse": { "id": "GetQueryResultsResponse", "type": "object", "properties": { "cacheHit": { "type": "boolean", "description": "Whether the query result was fetched from the query cache." }, "etag": { "type": "string", "description": "A hash of this response." }, "jobComplete": { "type": "boolean", "description": "Whether the query has completed or not. If rows or totalRows are present, this will always be true. If this is false, totalRows will not be available." }, "jobReference": { "$ref": "JobReference", "description": "Reference to the BigQuery Job that was created to run the query. This field will be present even if the original request timed out, in which case GetQueryResults can be used to read the results once the query has completed. Since this API only returns the first page of results, subsequent pages can be fetched via the same mechanism (GetQueryResults)." }, "kind": { "type": "string", "description": "The resource type of the response.", "default": "bigquery#getQueryResultsResponse" }, "pageToken": { "type": "string", "description": "A token used for paging results." }, "rows": { "type": "array", "description": "An object with as many results as can be contained within the maximum permitted reply size. To get any additional rows, you can call GetQueryResults and specify the jobReference returned above. Present only when the query completes successfully.", "items": { "$ref": "TableRow" } }, "schema": { "$ref": "TableSchema", "description": "The schema of the results. Present only when the query completes successfully." }, "totalBytesProcessed": { "type": "string", "description": "The total number of bytes processed for this query.", "format": "int64" }, "totalRows": { "type": "string", "description": "The total number of rows in the complete query result set, which can be more than the number of rows in this single page of results. Present only when the query completes successfully.", "format": "uint64" } } }, "Job": { "id": "Job", "type": "object", "properties": { "configuration": { "$ref": "JobConfiguration", "description": "[Required] Describes the job configuration." }, "etag": { "type": "string", "description": "[Output-only] A hash of this resource." }, "id": { "type": "string", "description": "[Output-only] Opaque ID field of the job" }, "jobReference": { "$ref": "JobReference", "description": "[Optional] Reference describing the unique-per-user name of the job." }, "kind": { "type": "string", "description": "[Output-only] The type of the resource.", "default": "bigquery#job" }, "selfLink": { "type": "string", "description": "[Output-only] A URL that can be used to access this resource again." }, "statistics": { "$ref": "JobStatistics", "description": "[Output-only] Information about the job, including starting time and ending time of the job." }, "status": { "$ref": "JobStatus", "description": "[Output-only] The status of this job. Examine this value when polling an asynchronous job to see if the job is complete." }, "user_email": { "type": "string", "description": "[Output-only] Email address of the user who ran the job." } } }, "JobConfiguration": { "id": "JobConfiguration", "type": "object", "properties": { "copy": { "$ref": "JobConfigurationTableCopy", "description": "[Pick one] Copies a table." }, "dryRun": { "type": "boolean", "description": "[Optional] If set, don't actually run this job. A valid query will return a mostly empty response with some processing statistics, while an invalid query will return the same error it would if it wasn't a dry run. Behavior of non-query jobs is undefined." }, "extract": { "$ref": "JobConfigurationExtract", "description": "[Pick one] Configures an extract job." }, "link": { "$ref": "JobConfigurationLink", "description": "[Pick one] Configures a link job." }, "load": { "$ref": "JobConfigurationLoad", "description": "[Pick one] Configures a load job." }, "query": { "$ref": "JobConfigurationQuery", "description": "[Pick one] Configures a query job." } } }, "JobConfigurationExtract": { "id": "JobConfigurationExtract", "type": "object", "properties": { "compression": { "type": "string", "description": "[Optional] The compression type to use for exported files. Possible values include GZIP and NONE. The default value is NONE." }, "destinationFormat": { "type": "string", "description": "[Optional] The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO. The default value is CSV. Tables with nested or repeated fields cannot be exported as CSV." }, "destinationUri": { "type": "string", "description": "[Pick one] DEPRECATED: Use destinationUris instead, passing only one URI as necessary. The fully-qualified Google Cloud Storage URI where the extracted table should be written." }, "destinationUris": { "type": "array", "description": "[Pick one] A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.", "items": { "type": "string" } }, "fieldDelimiter": { "type": "string", "description": "[Optional] Delimiter to use between fields in the exported data. Default is ','" }, "printHeader": { "type": "boolean", "description": "[Optional] Whether to print out a header row in the results. Default is true." }, "sourceTable": { "$ref": "TableReference", "description": "[Required] A reference to the table being exported." } } }, "JobConfigurationLink": { "id": "JobConfigurationLink", "type": "object", "properties": { "createDisposition": { "type": "string", "description": "[Optional] Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions occur as one atomic update upon job completion." }, "destinationTable": { "$ref": "TableReference", "description": "[Required] The destination table of the link job." }, "sourceUri": { "type": "array", "description": "[Required] URI of source table to link.", "items": { "type": "string" } }, "writeDisposition": { "type": "string", "description": "[Optional] Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. The default value is WRITE_EMPTY. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion." } } }, "JobConfigurationLoad": { "id": "JobConfigurationLoad", "type": "object", "properties": { "allowJaggedRows": { "type": "boolean", "description": "[Optional] Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats." }, "allowQuotedNewlines": { "type": "boolean", "description": "Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false." }, "createDisposition": { "type": "string", "description": "[Optional] Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions occur as one atomic update upon job completion." }, "destinationTable": { "$ref": "TableReference", "description": "[Required] The destination table to load the data into." }, "encoding": { "type": "string", "description": "[Optional] The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties." }, "fieldDelimiter": { "type": "string", "description": "[Optional] The separator for fields in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence \"\\t\" to specify a tab separator. The default value is a comma (',')." }, "ignoreUnknownValues": { "type": "boolean", "description": "[Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names" }, "maxBadRecords": { "type": "integer", "description": "[Optional] The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.", "format": "int32" }, "projectionFields": { "type": "array", "description": "[Experimental] If sourceFormat is set to \"DATASTORE_BACKUP\", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.", "items": { "type": "string" } }, "quote": { "type": "string", "description": "[Optional] The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('\"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true." }, "schema": { "$ref": "TableSchema", "description": "[Optional] The schema for the destination table. The schema can be omitted if the destination table already exists or if the schema can be inferred from the loaded data." }, "schemaInline": { "type": "string", "description": "[Deprecated] The inline schema. For CSV schemas, specify as \"Field1:Type1[,Field2:Type2]*\". For example, \"foo:STRING, bar:INTEGER, baz:FLOAT\"." }, "schemaInlineFormat": { "type": "string", "description": "[Deprecated] The format of the schemaInline property." }, "skipLeadingRows": { "type": "integer", "description": "[Optional] The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped.", "format": "int32" }, "sourceFormat": { "type": "string", "description": "[Optional] The format of the data files. For CSV files, specify \"CSV\". For datastore backups, specify \"DATASTORE_BACKUP\". For newline-delimited JSON, specify \"NEWLINE_DELIMITED_JSON\". The default value is CSV." }, "sourceUris": { "type": "array", "description": "[Required] The fully-qualified URIs that point to your data in Google Cloud Storage. Each URI can contain one '*' wildcard character and it must come after the 'bucket' name.", "items": { "type": "string" } }, "writeDisposition": { "type": "string", "description": "[Optional] Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. The default value is WRITE_EMPTY. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion." } } }, "JobConfigurationQuery": { "id": "JobConfigurationQuery", "type": "object", "properties": { "allowLargeResults": { "type": "boolean", "description": "If true, allows the query to produce arbitrarily large result tables at a slight cost in performance. Requires destinationTable to be set." }, "createDisposition": { "type": "string", "description": "[Optional] Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions occur as one atomic update upon job completion." }, "defaultDataset": { "$ref": "DatasetReference", "description": "[Optional] Specifies the default dataset to use for unqualified table names in the query." }, "destinationTable": { "$ref": "TableReference", "description": "[Optional] Describes the table where the query results should be stored. If not present, a new table will be created to store the results." }, "flattenResults": { "type": "boolean", "description": "[Optional] Flattens all nested and repeated fields in the query results. The default value is true. allowLargeResults must be true if this is set to false." }, "preserveNulls": { "type": "boolean", "description": "[Deprecated] This property is deprecated." }, "priority": { "type": "string", "description": "[Optional] Specifies a priority for the query. Possible values include INTERACTIVE and BATCH. The default value is INTERACTIVE." }, "query": { "type": "string", "description": "[Required] BigQuery SQL query to execute." }, "tableDefinitions": { "type": "object", "description": "[Experimental] If querying an external data source outside of BigQuery, describes the data format, location and other properties of the data source. By defining these properties, the data source can then be queried as if it were a standard BigQuery table.", "additionalProperties": { "$ref": "ExternalDataConfiguration" } }, "useQueryCache": { "type": "boolean", "description": "[Optional] Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified." }, "writeDisposition": { "type": "string", "description": "[Optional] Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. The default value is WRITE_EMPTY. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion." } } }, "JobConfigurationTableCopy": { "id": "JobConfigurationTableCopy", "type": "object", "properties": { "createDisposition": { "type": "string", "description": "[Optional] Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions occur as one atomic update upon job completion." }, "destinationTable": { "$ref": "TableReference", "description": "[Required] The destination table" }, "sourceTable": { "$ref": "TableReference", "description": "[Pick one] Source table to copy." }, "sourceTables": { "type": "array", "description": "[Pick one] Source tables to copy.", "items": { "$ref": "TableReference" } }, "writeDisposition": { "type": "string", "description": "[Optional] Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. The default value is WRITE_EMPTY. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion." } } }, "JobList": { "id": "JobList", "type": "object", "properties": { "etag": { "type": "string", "description": "A hash of this page of results." }, "jobs": { "type": "array", "description": "List of jobs that were requested.", "items": { "type": "object", "properties": { "configuration": { "$ref": "JobConfiguration", "description": "[Full-projection-only] Specifies the job configuration." }, "errorResult": { "$ref": "ErrorProto", "description": "A result object that will be present only if the job has failed." }, "id": { "type": "string", "description": "Unique opaque ID of the job." }, "jobReference": { "$ref": "JobReference", "description": "Job reference uniquely identifying the job." }, "kind": { "type": "string", "description": "The resource type.", "default": "bigquery#job" }, "state": { "type": "string", "description": "Running state of the job. When the state is DONE, errorResult can be checked to determine whether the job succeeded or failed." }, "statistics": { "$ref": "JobStatistics", "description": "[Output-only] Information about the job, including starting time and ending time of the job." }, "status": { "$ref": "JobStatus", "description": "[Full-projection-only] Describes the state of the job." }, "user_email": { "type": "string", "description": "[Full-projection-only] Email address of the user who ran the job." } } } }, "kind": { "type": "string", "description": "The resource type of the response.", "default": "bigquery#jobList" }, "nextPageToken": { "type": "string", "description": "A token to request the next page of results." }, "totalItems": { "type": "integer", "description": "Total number of jobs in this collection.", "format": "int32" } } }, "JobReference": { "id": "JobReference", "type": "object", "properties": { "jobId": { "type": "string", "description": "[Required] The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.", "annotations": { "required": [ "bigquery.jobs.getQueryResults" ] } }, "projectId": { "type": "string", "description": "[Required] The ID of the project containing this job.", "annotations": { "required": [ "bigquery.jobs.getQueryResults" ] } } } }, "JobStatistics": { "id": "JobStatistics", "type": "object", "properties": { "creationTime": { "type": "string", "description": "[Output-only] Creation time of this job, in milliseconds since the epoch. This field will be present on all jobs.", "format": "int64" }, "endTime": { "type": "string", "description": "[Output-only] End time of this job, in milliseconds since the epoch. This field will be present whenever a job is in the DONE state.", "format": "int64" }, "extract": { "$ref": "JobStatistics4", "description": "[Output-only] Statistics for an extract job." }, "load": { "$ref": "JobStatistics3", "description": "[Output-only] Statistics for a load job." }, "query": { "$ref": "JobStatistics2", "description": "[Output-only] Statistics for a query job." }, "startTime": { "type": "string", "description": "[Output-only] Start time of this job, in milliseconds since the epoch. This field will be present when the job transitions from the PENDING state to either RUNNING or DONE.", "format": "int64" }, "totalBytesProcessed": { "type": "string", "description": "[Output-only] [Deprecated] Use the bytes processed in the query statistics instead.", "format": "int64" } } }, "JobStatistics2": { "id": "JobStatistics2", "type": "object", "properties": { "cacheHit": { "type": "boolean", "description": "[Output-only] Whether the query result was fetched from the query cache." }, "totalBytesProcessed": { "type": "string", "description": "[Output-only] Total bytes processed for this job.", "format": "int64" } } }, "JobStatistics3": { "id": "JobStatistics3", "type": "object", "properties": { "inputFileBytes": { "type": "string", "description": "[Output-only] Number of bytes of source data in a joad job.", "format": "int64" }, "inputFiles": { "type": "string", "description": "[Output-only] Number of source files in a load job.", "format": "int64" }, "outputBytes": { "type": "string", "description": "[Output-only] Size of the loaded data in bytes. Note that while an import job is in the running state, this value may change.", "format": "int64" }, "outputRows": { "type": "string", "description": "[Output-only] Number of rows imported in a load job. Note that while an import job is in the running state, this value may change.", "format": "int64" } } }, "JobStatistics4": { "id": "JobStatistics4", "type": "object", "properties": { "destinationUriFileCounts": { "type": "array", "description": "[Experimental] Number of files per destination URI or URI pattern specified in the extract configuration. These values will be in the same order as the URIs specified in the 'destinationUris' field.", "items": { "type": "string", "format": "int64" } } } }, "JobStatus": { "id": "JobStatus", "type": "object", "properties": { "errorResult": { "$ref": "ErrorProto", "description": "[Output-only] Final error result of the job. If present, indicates that the job has completed and was unsuccessful." }, "errors": { "type": "array", "description": "[Output-only] All errors encountered during the running of the job. Errors here do not necessarily mean that the job has completed or was unsuccessful.", "items": { "$ref": "ErrorProto" } }, "state": { "type": "string", "description": "[Output-only] Running state of the job." } } }, "JsonObject": { "id": "JsonObject", "type": "object", "description": "Represents a single JSON object.", "additionalProperties": { "$ref": "JsonValue" } }, "JsonValue": { "id": "JsonValue", "type": "any" }, "ProjectList": { "id": "ProjectList", "type": "object", "properties": { "etag": { "type": "string", "description": "A hash of the page of results" }, "kind": { "type": "string", "description": "The type of list.", "default": "bigquery#projectList" }, "nextPageToken": { "type": "string", "description": "A token to request the next page of results." }, "projects": { "type": "array", "description": "Projects to which you have at least READ access.", "items": { "type": "object", "properties": { "friendlyName": { "type": "string", "description": "A descriptive name for this project." }, "id": { "type": "string", "description": "An opaque ID of this project." }, "kind": { "type": "string", "description": "The resource type.", "default": "bigquery#project" }, "numericId": { "type": "string", "description": "The numeric ID of this project.", "format": "uint64" }, "projectReference": { "$ref": "ProjectReference", "description": "A unique reference to this project." } } } }, "totalItems": { "type": "integer", "description": "The total number of projects in the list.", "format": "int32" } } }, "ProjectReference": { "id": "ProjectReference", "type": "object", "properties": { "projectId": { "type": "string", "description": "[Required] ID of the project. Can be either the numeric ID or the assigned ID of the project." } } }, "QueryRequest": { "id": "QueryRequest", "type": "object", "properties": { "defaultDataset": { "$ref": "DatasetReference", "description": "[Optional] Specifies the default datasetId and projectId to assume for any unqualified table names in the query. If not set, all table names in the query string must be qualified in the format 'datasetId.tableId'." }, "dryRun": { "type": "boolean", "description": "[Optional] If set, don't actually run this job. A valid query will return a mostly empty response with some processing statistics, while an invalid query will return the same error it would if it wasn't a dry run." }, "kind": { "type": "string", "description": "The resource type of the request.", "default": "bigquery#queryRequest" }, "maxResults": { "type": "integer", "description": "[Optional] The maximum number of rows of data to return per page of results. Setting this flag to a small value such as 1000 and then paging through results might improve reliability when the query result set is large. In addition to this limit, responses are also limited to 10 MB. By default, there is no maximum row count, and only the byte limit applies.", "format": "uint32" }, "preserveNulls": { "type": "boolean", "description": "[Deprecated] This property is deprecated." }, "query": { "type": "string", "description": "[Required] A query string, following the BigQuery query syntax, of the query to execute. Example: \"SELECT count(f1) FROM [myProjectId:myDatasetId.myTableId]\".", "annotations": { "required": [ "bigquery.jobs.query" ] } }, "timeoutMs": { "type": "integer", "description": "[Optional] How long to wait for the query to complete, in milliseconds, before the request times out and returns. Note that this is only a timeout for the request, not the query. If the query takes longer to run than the timeout value, the call returns without any results and with the 'jobComplete' flag set to false. You can call GetQueryResults() to wait for the query to complete and read the results. The default value is 10000 milliseconds (10 seconds).", "format": "uint32" }, "useQueryCache": { "type": "boolean", "description": "[Optional] Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. The default value is true." } } }, "QueryResponse": { "id": "QueryResponse", "type": "object", "properties": { "cacheHit": { "type": "boolean", "description": "Whether the query result was fetched from the query cache." }, "jobComplete": { "type": "boolean", "description": "Whether the query has completed or not. If rows or totalRows are present, this will always be true. If this is false, totalRows will not be available." }, "jobReference": { "$ref": "JobReference", "description": "Reference to the Job that was created to run the query. This field will be present even if the original request timed out, in which case GetQueryResults can be used to read the results once the query has completed. Since this API only returns the first page of results, subsequent pages can be fetched via the same mechanism (GetQueryResults)." }, "kind": { "type": "string", "description": "The resource type.", "default": "bigquery#queryResponse" }, "pageToken": { "type": "string", "description": "A token used for paging results." }, "rows": { "type": "array", "description": "An object with as many results as can be contained within the maximum permitted reply size. To get any additional rows, you can call GetQueryResults and specify the jobReference returned above.", "items": { "$ref": "TableRow" } }, "schema": { "$ref": "TableSchema", "description": "The schema of the results. Present only when the query completes successfully." }, "totalBytesProcessed": { "type": "string", "description": "The total number of bytes processed for this query. If this query was a dry run, this is the number of bytes that would be processed if the query were run.", "format": "int64" }, "totalRows": { "type": "string", "description": "The total number of rows in the complete query result set, which can be more than the number of rows in this single page of results.", "format": "uint64" } } }, "Table": { "id": "Table", "type": "object", "properties": { "creationTime": { "type": "string", "description": "[Output-only] The time when this table was created, in milliseconds since the epoch.", "format": "int64" }, "description": { "type": "string", "description": "[Optional] A user-friendly description of this table." }, "etag": { "type": "string", "description": "[Output-only] A hash of this resource." }, "expirationTime": { "type": "string", "description": "[Optional] The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.", "format": "int64" }, "friendlyName": { "type": "string", "description": "[Optional] A descriptive name for this table." }, "id": { "type": "string", "description": "[Output-only] An opaque ID uniquely identifying the table." }, "kind": { "type": "string", "description": "[Output-only] The type of the resource.", "default": "bigquery#table" }, "lastModifiedTime": { "type": "string", "description": "[Output-only] The time when this table was last modified, in milliseconds since the epoch.", "format": "uint64" }, "numBytes": { "type": "string", "description": "[Output-only] The size of the table in bytes. This property is unavailable for tables that are actively receiving streaming inserts.", "format": "int64" }, "numRows": { "type": "string", "description": "[Output-only] The number of rows of data in this table. This property is unavailable for tables that are actively receiving streaming inserts.", "format": "uint64" }, "schema": { "$ref": "TableSchema", "description": "[Optional] Describes the schema of this table." }, "selfLink": { "type": "string", "description": "[Output-only] A URL that can be used to access this resource again." }, "tableReference": { "$ref": "TableReference", "description": "[Required] Reference describing the ID of this table." }, "type": { "type": "string", "description": "[Output-only] Describes the table type. The following values are supported: TABLE: A normal BigQuery table. VIEW: A virtual table defined by a SQL query. The default value is TABLE." }, "view": { "$ref": "ViewDefinition", "description": "[Optional] The view definition." } } }, "TableCell": { "id": "TableCell", "type": "object", "description": "Represents a single cell in the result set. Users of the java client can detect whether their value result is null by calling 'com.google.api.client.util.Data.isNull(cell.getV())'.", "properties": { "v": { "type": "any" } } }, "TableDataInsertAllRequest": { "id": "TableDataInsertAllRequest", "type": "object", "properties": { "ignoreUnknownValues": { "type": "boolean", "description": "[Optional] Accept rows that contain values that do not match the schema. The unknown values are ignored. Default is false, which treats unknown values as errors." }, "kind": { "type": "string", "description": "The resource type of the response.", "default": "bigquery#tableDataInsertAllRequest" }, "rows": { "type": "array", "description": "The rows to insert.", "items": { "type": "object", "properties": { "insertId": { "type": "string", "description": "[Optional] A unique ID for each row. BigQuery uses this property to detect duplicate insertion requests on a best-effort basis." }, "json": { "$ref": "JsonObject", "description": "[Required] A JSON object that contains a row of data. The object's properties and values must match the destination table's schema." } } } }, "skipInvalidRows": { "type": "boolean", "description": "[Optional] Insert all valid rows of a request, even if invalid rows exist. The default value is false, which causes the entire request to fail if any invalid rows exist." } } }, "TableDataInsertAllResponse": { "id": "TableDataInsertAllResponse", "type": "object", "properties": { "insertErrors": { "type": "array", "description": "An array of errors for rows that were not inserted.", "items": { "type": "object", "properties": { "errors": { "type": "array", "description": "Error information for the row indicated by the index property.", "items": { "$ref": "ErrorProto" } }, "index": { "type": "integer", "description": "The index of the row that error applies to.", "format": "uint32" } } } }, "kind": { "type": "string", "description": "The resource type of the response.", "default": "bigquery#tableDataInsertAllResponse" } } }, "TableDataList": { "id": "TableDataList", "type": "object", "properties": { "etag": { "type": "string", "description": "A hash of this page of results." }, "kind": { "type": "string", "description": "The resource type of the response.", "default": "bigquery#tableDataList" }, "pageToken": { "type": "string", "description": "A token used for paging results. Providing this token instead of the startIndex parameter can help you retrieve stable results when an underlying table is changing." }, "rows": { "type": "array", "description": "Rows of results.", "items": { "$ref": "TableRow" } }, "totalRows": { "type": "string", "description": "The total number of rows in the complete table.", "format": "int64" } } }, "TableFieldSchema": { "id": "TableFieldSchema", "type": "object", "properties": { "description": { "type": "string", "description": "[Optional] The field description. The maximum length is 16K characters." }, "fields": { "type": "array", "description": "[Optional] Describes the nested schema fields if the type property is set to RECORD.", "items": { "$ref": "TableFieldSchema" } }, "mode": { "type": "string", "description": "[Optional] The field mode. Possible values include NULLABLE, REQUIRED and REPEATED. The default value is NULLABLE." }, "name": { "type": "string", "description": "[Required] The field name. The name must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_), and must start with a letter or underscore. The maximum length is 128 characters." }, "type": { "type": "string", "description": "[Required] The field data type. Possible values include STRING, INTEGER, FLOAT, BOOLEAN, TIMESTAMP or RECORD (where RECORD indicates that the field contains a nested schema)." } } }, "TableList": { "id": "TableList", "type": "object", "properties": { "etag": { "type": "string", "description": "A hash of this page of results." }, "kind": { "type": "string", "description": "The type of list.", "default": "bigquery#tableList" }, "nextPageToken": { "type": "string", "description": "A token to request the next page of results." }, "tables": { "type": "array", "description": "Tables in the requested dataset.", "items": { "type": "object", "properties": { "friendlyName": { "type": "string", "description": "The user-friendly name for this table." }, "id": { "type": "string", "description": "An opaque ID of the table" }, "kind": { "type": "string", "description": "The resource type.", "default": "bigquery#table" }, "tableReference": { "$ref": "TableReference", "description": "A reference uniquely identifying the table." }, "type": { "type": "string", "description": "The type of table. Possible values are: TABLE, VIEW." } } } }, "totalItems": { "type": "integer", "description": "The total number of tables in the dataset.", "format": "int32" } } }, "TableReference": { "id": "TableReference", "type": "object", "properties": { "datasetId": { "type": "string", "description": "[Required] The ID of the dataset containing this table.", "annotations": { "required": [ "bigquery.tables.update" ] } }, "projectId": { "type": "string", "description": "[Required] The ID of the project containing this table.", "annotations": { "required": [ "bigquery.tables.update" ] } }, "tableId": { "type": "string", "description": "[Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.", "annotations": { "required": [ "bigquery.tables.update" ] } } } }, "TableRow": { "id": "TableRow", "type": "object", "description": "Represents a single row in the result set, consisting of one or more fields.", "properties": { "f": { "type": "array", "items": { "$ref": "TableCell" } } } }, "TableSchema": { "id": "TableSchema", "type": "object", "properties": { "fields": { "type": "array", "description": "Describes the fields in a table.", "items": { "$ref": "TableFieldSchema" } } } }, "ViewDefinition": { "id": "ViewDefinition", "type": "object", "properties": { "query": { "type": "string", "description": "[Required] A query that BigQuery executes when the view is referenced." } } } }, "resources": { "datasets": { "methods": { "delete": { "id": "bigquery.datasets.delete", "path": "projects/{projectId}/datasets/{datasetId}", "httpMethod": "DELETE", "description": "Deletes the dataset specified by the datasetId value. Before you can delete a dataset, you must delete all its tables, either manually or by specifying deleteContents. Immediately after deletion, you can create another dataset with the same name.", "parameters": { "datasetId": { "type": "string", "description": "Dataset ID of dataset being deleted", "required": true, "location": "path" }, "deleteContents": { "type": "boolean", "description": "If True, delete all the tables in the dataset. If False and the dataset contains tables, the request will fail. Default is False", "location": "query" }, "projectId": { "type": "string", "description": "Project ID of the dataset being deleted", "required": true, "location": "path" } }, "parameterOrder": [ "projectId", "datasetId" ], "scopes": [ "https://www.googleapis.com/auth/bigquery", "https://www.googleapis.com/auth/cloud-platform" ] }, "get": { "id": "bigquery.datasets.get", "path": "projects/{projectId}/datasets/{datasetId}", "httpMethod": "GET", "description": "Returns the dataset specified by datasetID.", "parameters": { "datasetId": { "type": "string", "description": "Dataset ID of the requested dataset", "required": true, "location": "path" }, "projectId": { "type": "string", "description": "Project ID of the requested dataset", "required": true, "location": "path" } }, "parameterOrder": [ "projectId", "datasetId" ], "response": { "$ref": "Dataset" }, "scopes": [ "https://www.googleapis.com/auth/bigquery", "https://www.googleapis.com/auth/cloud-platform" ] }, "insert": { "id": "bigquery.datasets.insert", "path": "projects/{projectId}/datasets", "httpMethod": "POST", "description": "Creates a new empty dataset.", "parameters": { "projectId": { "type": "string", "description": "Project ID of the new dataset", "required": true, "location": "path" } }, "parameterOrder": [ "projectId" ], "request": { "$ref": "Dataset" }, "response": { "$ref": "Dataset" }, "scopes": [ "https://www.googleapis.com/auth/bigquery", "https://www.googleapis.com/auth/cloud-platform" ] }, "list": { "id": "bigquery.datasets.list", "path": "projects/{projectId}/datasets", "httpMethod": "GET", "description": "Lists all datasets in the specified project to which you have been granted the READER dataset role.", "parameters": { "all": { "type": "boolean", "description": "Whether to list all datasets, including hidden ones", "location": "query" }, "maxResults": { "type": "integer", "description": "The maximum number of results to return", "format": "uint32", "location": "query" }, "pageToken": { "type": "string", "description": "Page token, returned by a previous call, to request the next page of results", "location": "query" }, "projectId": { "type": "string", "description": "Project ID of the datasets to be listed", "required": true, "location": "path" } }, "parameterOrder": [ "projectId" ], "response": { "$ref": "DatasetList" }, "scopes": [ "https://www.googleapis.com/auth/bigquery", "https://www.googleapis.com/auth/cloud-platform" ] }, "patch": { "id": "bigquery.datasets.patch", "path": "projects/{projectId}/datasets/{datasetId}", "httpMethod": "PATCH", "description": "Updates information in an existing dataset. The update method replaces the entire dataset resource, whereas the patch method only replaces fields that are provided in the submitted dataset resource. This method supports patch semantics.", "parameters": { "datasetId": { "type": "string", "description": "Dataset ID of the dataset being updated", "required": true, "location": "path" }, "projectId": { "type": "string", "description": "Project ID of the dataset being updated", "required": true, "location": "path" } }, "parameterOrder": [ "projectId", "datasetId" ], "request": { "$ref": "Dataset" }, "response": { "$ref": "Dataset" }, "scopes": [ "https://www.googleapis.com/auth/bigquery", "https://www.googleapis.com/auth/cloud-platform" ] }, "update": { "id": "bigquery.datasets.update", "path": "projects/{projectId}/datasets/{datasetId}", "httpMethod": "PUT", "description": "Updates information in an existing dataset. The update method replaces the entire dataset resource, whereas the patch method only replaces fields that are provided in the submitted dataset resource.", "parameters": { "datasetId": { "type": "string", "description": "Dataset ID of the dataset being updated", "required": true, "location": "path" }, "projectId": { "type": "string", "description": "Project ID of the dataset being updated", "required": true, "location": "path" } }, "parameterOrder": [ "projectId", "datasetId" ], "request": { "$ref": "Dataset" }, "response": { "$ref": "Dataset" }, "scopes": [ "https://www.googleapis.com/auth/bigquery", "https://www.googleapis.com/auth/cloud-platform" ] } } }, "jobs": { "methods": { "get": { "id": "bigquery.jobs.get", "path": "projects/{projectId}/jobs/{jobId}", "httpMethod": "GET", "description": "Returns information about a specific job. Job information is available for a six month period after creation. Requires that you're the person who ran the job, or have the Is Owner project role.", "parameters": { "jobId": { "type": "string", "description": "Job ID of the requested job", "required": true, "location": "path" }, "projectId": { "type": "string", "description": "Project ID of the requested job", "required": true, "location": "path" } }, "parameterOrder": [ "projectId", "jobId" ], "response": { "$ref": "Job" }, "scopes": [ "https://www.googleapis.com/auth/bigquery", "https://www.googleapis.com/auth/cloud-platform" ] }, "getQueryResults": { "id": "bigquery.jobs.getQueryResults", "path": "projects/{projectId}/queries/{jobId}", "httpMethod": "GET", "description": "Retrieves the results of a query job.", "parameters": { "jobId": { "type": "string", "description": "Job ID of the query job", "required": true, "location": "path" }, "maxResults": { "type": "integer", "description": "Maximum number of results to read", "format": "uint32", "location": "query" }, "pageToken": { "type": "string", "description": "Page token, returned by a previous call, to request the next page of results", "location": "query" }, "projectId": { "type": "string", "description": "Project ID of the query job", "required": true, "location": "path" }, "startIndex": { "type": "string", "description": "Zero-based index of the starting row", "format": "uint64", "location": "query" }, "timeoutMs": { "type": "integer", "description": "How long to wait for the query to complete, in milliseconds, before returning. Default is to return immediately. If the timeout passes before the job completes, the request will fail with a TIMEOUT error", "format": "uint32", "location": "query" } }, "parameterOrder": [ "projectId", "jobId" ], "response": { "$ref": "GetQueryResultsResponse" }, "scopes": [ "https://www.googleapis.com/auth/bigquery", "https://www.googleapis.com/auth/cloud-platform" ] }, "insert": { "id": "bigquery.jobs.insert", "path": "projects/{projectId}/jobs", "httpMethod": "POST", "description": "Starts a new asynchronous job. Requires the Can View project role.", "parameters": { "projectId": { "type": "string", "description": "Project ID of the project that will be billed for the job", "required": true, "location": "path" } }, "parameterOrder": [ "projectId" ], "request": { "$ref": "Job" }, "response": { "$ref": "Job" }, "scopes": [ "https://www.googleapis.com/auth/bigquery", "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.full_control", "https://www.googleapis.com/auth/devstorage.read_only", "https://www.googleapis.com/auth/devstorage.read_write" ], "supportsMediaUpload": true, "mediaUpload": { "accept": [ "*/*" ], "protocols": { "simple": { "multipart": true, "path": "/upload/bigquery/v2/projects/{projectId}/jobs" }, "resumable": { "multipart": true, "path": "/resumable/upload/bigquery/v2/projects/{projectId}/jobs" } } } }, "list": { "id": "bigquery.jobs.list", "path": "projects/{projectId}/jobs", "httpMethod": "GET", "description": "Lists all jobs that you started in the specified project. The job list returns in reverse chronological order of when the jobs were created, starting with the most recent job created. Requires the Can View project role, or the Is Owner project role if you set the allUsers property.", "parameters": { "allUsers": { "type": "boolean", "description": "Whether to display jobs owned by all users in the project. Default false", "location": "query" }, "maxResults": { "type": "integer", "description": "Maximum number of results to return", "format": "uint32", "location": "query" }, "pageToken": { "type": "string", "description": "Page token, returned by a previous call, to request the next page of results", "location": "query" }, "projectId": { "type": "string", "description": "Project ID of the jobs to list", "required": true, "location": "path" }, "projection": { "type": "string", "description": "Restrict information returned to a set of selected fields", "enum": [ "full", "minimal" ], "enumDescriptions": [ "Includes all job data", "Does not include the job configuration" ], "location": "query" }, "stateFilter": { "type": "string", "description": "Filter for job state", "enum": [ "done", "pending", "running" ], "enumDescriptions": [ "Finished jobs", "Pending jobs", "Running jobs" ], "repeated": true, "location": "query" } }, "parameterOrder": [ "projectId" ], "response": { "$ref": "JobList" }, "scopes": [ "https://www.googleapis.com/auth/bigquery", "https://www.googleapis.com/auth/cloud-platform" ] }, "query": { "id": "bigquery.jobs.query", "path": "projects/{projectId}/queries", "httpMethod": "POST", "description": "Runs a BigQuery SQL query synchronously and returns query results if the query completes within a specified timeout.", "parameters": { "projectId": { "type": "string", "description": "Project ID of the project billed for the query", "required": true, "location": "path" } }, "parameterOrder": [ "projectId" ], "request": { "$ref": "QueryRequest" }, "response": { "$ref": "QueryResponse" }, "scopes": [ "https://www.googleapis.com/auth/bigquery", "https://www.googleapis.com/auth/cloud-platform" ] } } }, "projects": { "methods": { "list": { "id": "bigquery.projects.list", "path": "projects", "httpMethod": "GET", "description": "Lists all projects to which you have been granted any project role.", "parameters": { "maxResults": { "type": "integer", "description": "Maximum number of results to return", "format": "uint32", "location": "query" }, "pageToken": { "type": "string", "description": "Page token, returned by a previous call, to request the next page of results", "location": "query" } }, "response": { "$ref": "ProjectList" }, "scopes": [ "https://www.googleapis.com/auth/bigquery", "https://www.googleapis.com/auth/cloud-platform" ] } } }, "tabledata": { "methods": { "insertAll": { "id": "bigquery.tabledata.insertAll", "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}/insertAll", "httpMethod": "POST", "description": "Streams data into BigQuery one record at a time without needing to run a load job. Requires the WRITER dataset role.", "parameters": { "datasetId": { "type": "string", "description": "Dataset ID of the destination table.", "required": true, "location": "path" }, "projectId": { "type": "string", "description": "Project ID of the destination table.", "required": true, "location": "path" }, "tableId": { "type": "string", "description": "Table ID of the destination table.", "required": true, "location": "path" } }, "parameterOrder": [ "projectId", "datasetId", "tableId" ], "request": { "$ref": "TableDataInsertAllRequest" }, "response": { "$ref": "TableDataInsertAllResponse" }, "scopes": [ "https://www.googleapis.com/auth/bigquery", "https://www.googleapis.com/auth/bigquery.insertdata", "https://www.googleapis.com/auth/cloud-platform" ] }, "list": { "id": "bigquery.tabledata.list", "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}/data", "httpMethod": "GET", "description": "Retrieves table data from a specified set of rows. Requires the READER dataset role.", "parameters": { "datasetId": { "type": "string", "description": "Dataset ID of the table to read", "required": true, "location": "path" }, "maxResults": { "type": "integer", "description": "Maximum number of results to return", "format": "uint32", "location": "query" }, "pageToken": { "type": "string", "description": "Page token, returned by a previous call, identifying the result set", "location": "query" }, "projectId": { "type": "string", "description": "Project ID of the table to read", "required": true, "location": "path" }, "startIndex": { "type": "string", "description": "Zero-based index of the starting row to read", "format": "uint64", "location": "query" }, "tableId": { "type": "string", "description": "Table ID of the table to read", "required": true, "location": "path" } }, "parameterOrder": [ "projectId", "datasetId", "tableId" ], "response": { "$ref": "TableDataList" }, "scopes": [ "https://www.googleapis.com/auth/bigquery", "https://www.googleapis.com/auth/cloud-platform" ] } } }, "tables": { "methods": { "delete": { "id": "bigquery.tables.delete", "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}", "httpMethod": "DELETE", "description": "Deletes the table specified by tableId from the dataset. If the table contains data, all the data will be deleted.", "parameters": { "datasetId": { "type": "string", "description": "Dataset ID of the table to delete", "required": true, "location": "path" }, "projectId": { "type": "string", "description": "Project ID of the table to delete", "required": true, "location": "path" }, "tableId": { "type": "string", "description": "Table ID of the table to delete", "required": true, "location": "path" } }, "parameterOrder": [ "projectId", "datasetId", "tableId" ], "scopes": [ "https://www.googleapis.com/auth/bigquery", "https://www.googleapis.com/auth/cloud-platform" ] }, "get": { "id": "bigquery.tables.get", "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}", "httpMethod": "GET", "description": "Gets the specified table resource by table ID. This method does not return the data in the table, it only returns the table resource, which describes the structure of this table.", "parameters": { "datasetId": { "type": "string", "description": "Dataset ID of the requested table", "required": true, "location": "path" }, "projectId": { "type": "string", "description": "Project ID of the requested table", "required": true, "location": "path" }, "tableId": { "type": "string", "description": "Table ID of the requested table", "required": true, "location": "path" } }, "parameterOrder": [ "projectId", "datasetId", "tableId" ], "response": { "$ref": "Table" }, "scopes": [ "https://www.googleapis.com/auth/bigquery", "https://www.googleapis.com/auth/cloud-platform" ] }, "insert": { "id": "bigquery.tables.insert", "path": "projects/{projectId}/datasets/{datasetId}/tables", "httpMethod": "POST", "description": "Creates a new, empty table in the dataset.", "parameters": { "datasetId": { "type": "string", "description": "Dataset ID of the new table", "required": true, "location": "path" }, "projectId": { "type": "string", "description": "Project ID of the new table", "required": true, "location": "path" } }, "parameterOrder": [ "projectId", "datasetId" ], "request": { "$ref": "Table" }, "response": { "$ref": "Table" }, "scopes": [ "https://www.googleapis.com/auth/bigquery", "https://www.googleapis.com/auth/cloud-platform" ] }, "list": { "id": "bigquery.tables.list", "path": "projects/{projectId}/datasets/{datasetId}/tables", "httpMethod": "GET", "description": "Lists all tables in the specified dataset. Requires the READER dataset role.", "parameters": { "datasetId": { "type": "string", "description": "Dataset ID of the tables to list", "required": true, "location": "path" }, "maxResults": { "type": "integer", "description": "Maximum number of results to return", "format": "uint32", "location": "query" }, "pageToken": { "type": "string", "description": "Page token, returned by a previous call, to request the next page of results", "location": "query" }, "projectId": { "type": "string", "description": "Project ID of the tables to list", "required": true, "location": "path" } }, "parameterOrder": [ "projectId", "datasetId" ], "response": { "$ref": "TableList" }, "scopes": [ "https://www.googleapis.com/auth/bigquery", "https://www.googleapis.com/auth/cloud-platform" ] }, "patch": { "id": "bigquery.tables.patch", "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}", "httpMethod": "PATCH", "description": "Updates information in an existing table. The update method replaces the entire table resource, whereas the patch method only replaces fields that are provided in the submitted table resource. This method supports patch semantics.", "parameters": { "datasetId": { "type": "string", "description": "Dataset ID of the table to update", "required": true, "location": "path" }, "projectId": { "type": "string", "description": "Project ID of the table to update", "required": true, "location": "path" }, "tableId": { "type": "string", "description": "Table ID of the table to update", "required": true, "location": "path" } }, "parameterOrder": [ "projectId", "datasetId", "tableId" ], "request": { "$ref": "Table" }, "response": { "$ref": "Table" }, "scopes": [ "https://www.googleapis.com/auth/bigquery", "https://www.googleapis.com/auth/cloud-platform" ] }, "update": { "id": "bigquery.tables.update", "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}", "httpMethod": "PUT", "description": "Updates information in an existing table. The update method replaces the entire table resource, whereas the patch method only replaces fields that are provided in the submitted table resource.", "parameters": { "datasetId": { "type": "string", "description": "Dataset ID of the table to update", "required": true, "location": "path" }, "projectId": { "type": "string", "description": "Project ID of the table to update", "required": true, "location": "path" }, "tableId": { "type": "string", "description": "Table ID of the table to update", "required": true, "location": "path" } }, "parameterOrder": [ "projectId", "datasetId", "tableId" ], "request": { "$ref": "Table" }, "response": { "$ref": "Table" }, "scopes": [ "https://www.googleapis.com/auth/bigquery", "https://www.googleapis.com/auth/cloud-platform" ] } } } } } ================================================ FILE: vendor/google.golang.org/api/bigquery/v2/bigquery-gen.go ================================================ // Package bigquery provides access to the BigQuery API. // // See https://cloud.google.com/bigquery/ // // Usage example: // // import "google.golang.org/api/bigquery/v2" // ... // bigqueryService, err := bigquery.New(oauthHttpClient) package bigquery import ( "bytes" "encoding/json" "errors" "fmt" "golang.org/x/net/context" "google.golang.org/api/googleapi" "io" "net/http" "net/url" "strconv" "strings" ) // Always reference these packages, just in case the auto-generated code // below doesn't. var _ = bytes.NewBuffer var _ = strconv.Itoa var _ = fmt.Sprintf var _ = json.NewDecoder var _ = io.Copy var _ = url.Parse var _ = googleapi.Version var _ = errors.New var _ = strings.Replace var _ = context.Background const apiId = "bigquery:v2" const apiName = "bigquery" const apiVersion = "v2" const basePath = "https://www.googleapis.com/bigquery/v2/" // OAuth2 scopes used by this API. const ( // View and manage your data in Google BigQuery BigqueryScope = "https://www.googleapis.com/auth/bigquery" // Insert data into Google BigQuery BigqueryInsertdataScope = "https://www.googleapis.com/auth/bigquery.insertdata" // View and manage your data across Google Cloud Platform services CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" // Manage your data and permissions in Google Cloud Storage DevstorageFullControlScope = "https://www.googleapis.com/auth/devstorage.full_control" // View your data in Google Cloud Storage DevstorageReadOnlyScope = "https://www.googleapis.com/auth/devstorage.read_only" // Manage your data in Google Cloud Storage DevstorageReadWriteScope = "https://www.googleapis.com/auth/devstorage.read_write" ) func New(client *http.Client) (*Service, error) { if client == nil { return nil, errors.New("client is nil") } s := &Service{client: client, BasePath: basePath} s.Datasets = NewDatasetsService(s) s.Jobs = NewJobsService(s) s.Projects = NewProjectsService(s) s.Tabledata = NewTabledataService(s) s.Tables = NewTablesService(s) return s, nil } type Service struct { client *http.Client BasePath string // API endpoint base URL UserAgent string // optional additional User-Agent fragment Datasets *DatasetsService Jobs *JobsService Projects *ProjectsService Tabledata *TabledataService Tables *TablesService } func (s *Service) userAgent() string { if s.UserAgent == "" { return googleapi.UserAgent } return googleapi.UserAgent + " " + s.UserAgent } func NewDatasetsService(s *Service) *DatasetsService { rs := &DatasetsService{s: s} return rs } type DatasetsService struct { s *Service } func NewJobsService(s *Service) *JobsService { rs := &JobsService{s: s} return rs } type JobsService struct { s *Service } func NewProjectsService(s *Service) *ProjectsService { rs := &ProjectsService{s: s} return rs } type ProjectsService struct { s *Service } func NewTabledataService(s *Service) *TabledataService { rs := &TabledataService{s: s} return rs } type TabledataService struct { s *Service } func NewTablesService(s *Service) *TablesService { rs := &TablesService{s: s} return rs } type TablesService struct { s *Service } type CsvOptions struct { // AllowJaggedRows: [Optional] Indicates if BigQuery should accept rows // that are missing trailing optional columns. If true, BigQuery treats // missing trailing columns as null values. If false, records with // missing trailing columns are treated as bad records, and if there are // too many bad records, an invalid error is returned in the job result. // The default value is false. AllowJaggedRows bool `json:"allowJaggedRows,omitempty"` // AllowQuotedNewlines: [Optional] Indicates if BigQuery should allow // quoted data sections that contain newline characters in a CSV file. // The default value is false. AllowQuotedNewlines bool `json:"allowQuotedNewlines,omitempty"` // Encoding: [Optional] The character encoding of the data. The // supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. // BigQuery decodes the data after the raw, binary data has been split // using the values of the quote and fieldDelimiter properties. Encoding string `json:"encoding,omitempty"` // FieldDelimiter: [Optional] The separator for fields in a CSV file. // BigQuery converts the string to ISO-8859-1 encoding, and then uses // the first byte of the encoded string to split the data in its raw, // binary state. BigQuery also supports the escape sequence "\t" to // specify a tab separator. The default value is a comma (','). FieldDelimiter string `json:"fieldDelimiter,omitempty"` // Quote: [Optional] The value that is used to quote data sections in a // CSV file. BigQuery converts the string to ISO-8859-1 encoding, and // then uses the first byte of the encoded string to split the data in // its raw, binary state. The default value is a double-quote ('"'). If // your data does not contain quoted sections, set the property value to // an empty string. If your data contains quoted newline characters, you // must also set the allowQuotedNewlines property to true. Quote string `json:"quote,omitempty"` // SkipLeadingRows: [Optional] The number of rows at the top of a CSV // file that BigQuery will skip when reading the data. The default value // is 0. This property is useful if you have header rows in the file // that should be skipped. SkipLeadingRows int64 `json:"skipLeadingRows,omitempty"` } type Dataset struct { // Access: [Optional] An array of objects that define dataset access for // one or more entities. You can set this property when inserting or // updating a dataset in order to control who is allowed to access the // data. If unspecified at dataset creation time, BigQuery adds default // dataset access for the following entities: access.specialGroup: // projectReaders; access.role: READER; access.specialGroup: // projectWriters; access.role: WRITER; access.specialGroup: // projectOwners; access.role: OWNER; access.userByEmail: [dataset // creator email]; access.role: OWNER; Access []*DatasetAccess `json:"access,omitempty"` // CreationTime: [Output-only] The time when this dataset was created, // in milliseconds since the epoch. CreationTime int64 `json:"creationTime,omitempty,string"` // DatasetReference: [Required] A reference that identifies the dataset. DatasetReference *DatasetReference `json:"datasetReference,omitempty"` // DefaultTableExpirationMs: [Experimental] The default lifetime of all // tables in the dataset, in milliseconds. The minimum value is 3600000 // milliseconds (one hour). Once this property is set, all newly-created // tables in the dataset will have an expirationTime property set to the // creation time plus the value in this property, and changing the value // will only affect new tables, not existing ones. When the // expirationTime for a given table is reached, that table will be // deleted automatically. If a table's expirationTime is modified or // removed before the table expires, or if you provide an explicit // expirationTime when creating a table, that value takes precedence // over the default expiration time indicated by this property. DefaultTableExpirationMs int64 `json:"defaultTableExpirationMs,omitempty,string"` // Description: [Optional] A user-friendly description of the dataset. Description string `json:"description,omitempty"` // Etag: [Output-only] A hash of the resource. Etag string `json:"etag,omitempty"` // FriendlyName: [Optional] A descriptive name for the dataset. FriendlyName string `json:"friendlyName,omitempty"` // Id: [Output-only] The fully-qualified unique name of the dataset in // the format projectId:datasetId. The dataset name without the project // name is given in the datasetId field. When creating a new dataset, // leave this field blank, and instead specify the datasetId field. Id string `json:"id,omitempty"` // Kind: [Output-only] The resource type. Kind string `json:"kind,omitempty"` // LastModifiedTime: [Output-only] The date when this dataset or any of // its tables was last modified, in milliseconds since the epoch. LastModifiedTime int64 `json:"lastModifiedTime,omitempty,string"` // Location: [Experimental] The location where the data resides. If not // present, the data will be stored in the US. Location string `json:"location,omitempty"` // SelfLink: [Output-only] A URL that can be used to access the resource // again. You can use this URL in Get or Update requests to the // resource. SelfLink string `json:"selfLink,omitempty"` } type DatasetAccess struct { // Domain: [Pick one] A domain to grant access to. Any users signed in // with the domain specified will be granted the specified access. // Example: "example.com". Domain string `json:"domain,omitempty"` // GroupByEmail: [Pick one] An email address of a Google Group to grant // access to. GroupByEmail string `json:"groupByEmail,omitempty"` // Role: [Required] Describes the rights granted to the user specified // by the other member of the access object. The following string values // are supported: READER, WRITER, OWNER. Role string `json:"role,omitempty"` // SpecialGroup: [Pick one] A special group to grant access to. Possible // values include: projectOwners: Owners of the enclosing project. // projectReaders: Readers of the enclosing project. projectWriters: // Writers of the enclosing project. allAuthenticatedUsers: All // authenticated BigQuery users. SpecialGroup string `json:"specialGroup,omitempty"` // UserByEmail: [Pick one] An email address of a user to grant access // to. For example: fred@example.com. UserByEmail string `json:"userByEmail,omitempty"` // View: [Pick one] A view from a different dataset to grant access to. // Queries executed against that view will have read access to tables in // this dataset. The role field is not required when this field is set. // If that view is updated by any user, access to the view needs to be // granted again via an update operation. View *TableReference `json:"view,omitempty"` } type DatasetList struct { // Datasets: An array of the dataset resources in the project. Each // resource contains basic information. For full information about a // particular dataset resource, use the Datasets: get method. This // property is omitted when there are no datasets in the project. Datasets []*DatasetListDatasets `json:"datasets,omitempty"` // Etag: A hash value of the results page. You can use this property to // determine if the page has changed since the last request. Etag string `json:"etag,omitempty"` // Kind: The list type. This property always returns the value // "bigquery#datasetList". Kind string `json:"kind,omitempty"` // NextPageToken: A token that can be used to request the next results // page. This property is omitted on the final results page. NextPageToken string `json:"nextPageToken,omitempty"` } type DatasetListDatasets struct { // DatasetReference: The dataset reference. Use this property to access // specific parts of the dataset's ID, such as project ID or dataset ID. DatasetReference *DatasetReference `json:"datasetReference,omitempty"` // FriendlyName: A descriptive name for the dataset, if one exists. FriendlyName string `json:"friendlyName,omitempty"` // Id: The fully-qualified, unique, opaque ID of the dataset. Id string `json:"id,omitempty"` // Kind: The resource type. This property always returns the value // "bigquery#dataset". Kind string `json:"kind,omitempty"` } type DatasetReference struct { // DatasetId: [Required] A unique ID for this dataset, without the // project name. The ID must contain only letters (a-z, A-Z), numbers // (0-9), or underscores (_). The maximum length is 1,024 characters. DatasetId string `json:"datasetId,omitempty"` // ProjectId: [Optional] The ID of the project containing this dataset. ProjectId string `json:"projectId,omitempty"` } type ErrorProto struct { // DebugInfo: Debugging information. This property is internal to Google // and should not be used. DebugInfo string `json:"debugInfo,omitempty"` // Location: Specifies where the error occurred, if present. Location string `json:"location,omitempty"` // Message: A human-readable description of the error. Message string `json:"message,omitempty"` // Reason: A short error code that summarizes the error. Reason string `json:"reason,omitempty"` } type ExternalDataConfiguration struct { // Compression: [Optional] The compression type of the data source. // Possible values include GZIP and NONE. The default value is NONE. Compression string `json:"compression,omitempty"` // CsvOptions: Additional properties to set if sourceFormat is set to // CSV. CsvOptions *CsvOptions `json:"csvOptions,omitempty"` // IgnoreUnknownValues: [Optional] Indicates if BigQuery should allow // extra values that are not represented in the table schema. If true, // the extra values are ignored. If false, records with extra columns // are treated as bad records, and if there are too many bad records, an // invalid error is returned in the job result. The default value is // false. The sourceFormat property determines what BigQuery treats as // an extra value: CSV: Trailing columns IgnoreUnknownValues bool `json:"ignoreUnknownValues,omitempty"` // MaxBadRecords: [Optional] The maximum number of bad records that // BigQuery can ignore when reading data. If the number of bad records // exceeds this value, an invalid error is returned in the job result. // The default value is 0, which requires that all records are valid. MaxBadRecords int64 `json:"maxBadRecords,omitempty"` // Schema: [Required] The schema for the data. Schema *TableSchema `json:"schema,omitempty"` // SourceFormat: [Optional] The data format. External data sources must // be in CSV format. The default value is CSV. SourceFormat string `json:"sourceFormat,omitempty"` // SourceUris: [Required] The fully-qualified URIs that point to your // data in Google Cloud Storage. Each URI can contain one '*' wildcard // character and it must come after the 'bucket' name. CSV limits // related to load jobs apply to external data sources, plus an // additional limit of 10 GB maximum size across all URIs. SourceUris []string `json:"sourceUris,omitempty"` } type GetQueryResultsResponse struct { // CacheHit: Whether the query result was fetched from the query cache. CacheHit bool `json:"cacheHit,omitempty"` // Etag: A hash of this response. Etag string `json:"etag,omitempty"` // JobComplete: Whether the query has completed or not. If rows or // totalRows are present, this will always be true. If this is false, // totalRows will not be available. JobComplete bool `json:"jobComplete,omitempty"` // JobReference: Reference to the BigQuery Job that was created to run // the query. This field will be present even if the original request // timed out, in which case GetQueryResults can be used to read the // results once the query has completed. Since this API only returns the // first page of results, subsequent pages can be fetched via the same // mechanism (GetQueryResults). JobReference *JobReference `json:"jobReference,omitempty"` // Kind: The resource type of the response. Kind string `json:"kind,omitempty"` // PageToken: A token used for paging results. PageToken string `json:"pageToken,omitempty"` // Rows: An object with as many results as can be contained within the // maximum permitted reply size. To get any additional rows, you can // call GetQueryResults and specify the jobReference returned above. // Present only when the query completes successfully. Rows []*TableRow `json:"rows,omitempty"` // Schema: The schema of the results. Present only when the query // completes successfully. Schema *TableSchema `json:"schema,omitempty"` // TotalBytesProcessed: The total number of bytes processed for this // query. TotalBytesProcessed int64 `json:"totalBytesProcessed,omitempty,string"` // TotalRows: The total number of rows in the complete query result set, // which can be more than the number of rows in this single page of // results. Present only when the query completes successfully. TotalRows uint64 `json:"totalRows,omitempty,string"` } type Job struct { // Configuration: [Required] Describes the job configuration. Configuration *JobConfiguration `json:"configuration,omitempty"` // Etag: [Output-only] A hash of this resource. Etag string `json:"etag,omitempty"` // Id: [Output-only] Opaque ID field of the job Id string `json:"id,omitempty"` // JobReference: [Optional] Reference describing the unique-per-user // name of the job. JobReference *JobReference `json:"jobReference,omitempty"` // Kind: [Output-only] The type of the resource. Kind string `json:"kind,omitempty"` // SelfLink: [Output-only] A URL that can be used to access this // resource again. SelfLink string `json:"selfLink,omitempty"` // Statistics: [Output-only] Information about the job, including // starting time and ending time of the job. Statistics *JobStatistics `json:"statistics,omitempty"` // Status: [Output-only] The status of this job. Examine this value when // polling an asynchronous job to see if the job is complete. Status *JobStatus `json:"status,omitempty"` // UserEmail: [Output-only] Email address of the user who ran the job. UserEmail string `json:"user_email,omitempty"` } type JobConfiguration struct { // Copy: [Pick one] Copies a table. Copy *JobConfigurationTableCopy `json:"copy,omitempty"` // DryRun: [Optional] If set, don't actually run this job. A valid query // will return a mostly empty response with some processing statistics, // while an invalid query will return the same error it would if it // wasn't a dry run. Behavior of non-query jobs is undefined. DryRun bool `json:"dryRun,omitempty"` // Extract: [Pick one] Configures an extract job. Extract *JobConfigurationExtract `json:"extract,omitempty"` // Link: [Pick one] Configures a link job. Link *JobConfigurationLink `json:"link,omitempty"` // Load: [Pick one] Configures a load job. Load *JobConfigurationLoad `json:"load,omitempty"` // Query: [Pick one] Configures a query job. Query *JobConfigurationQuery `json:"query,omitempty"` } type JobConfigurationExtract struct { // Compression: [Optional] The compression type to use for exported // files. Possible values include GZIP and NONE. The default value is // NONE. Compression string `json:"compression,omitempty"` // DestinationFormat: [Optional] The exported file format. Possible // values include CSV, NEWLINE_DELIMITED_JSON and AVRO. The default // value is CSV. Tables with nested or repeated fields cannot be // exported as CSV. DestinationFormat string `json:"destinationFormat,omitempty"` // DestinationUri: [Pick one] DEPRECATED: Use destinationUris instead, // passing only one URI as necessary. The fully-qualified Google Cloud // Storage URI where the extracted table should be written. DestinationUri string `json:"destinationUri,omitempty"` // DestinationUris: [Pick one] A list of fully-qualified Google Cloud // Storage URIs where the extracted table should be written. DestinationUris []string `json:"destinationUris,omitempty"` // FieldDelimiter: [Optional] Delimiter to use between fields in the // exported data. Default is ',' FieldDelimiter string `json:"fieldDelimiter,omitempty"` // PrintHeader: [Optional] Whether to print out a header row in the // results. Default is true. PrintHeader bool `json:"printHeader,omitempty"` // SourceTable: [Required] A reference to the table being exported. SourceTable *TableReference `json:"sourceTable,omitempty"` } type JobConfigurationLink struct { // CreateDisposition: [Optional] Specifies whether the job is allowed to // create new tables. The following values are supported: // CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the // table. CREATE_NEVER: The table must already exist. If it does not, a // 'notFound' error is returned in the job result. The default value is // CREATE_IF_NEEDED. Creation, truncation and append actions occur as // one atomic update upon job completion. CreateDisposition string `json:"createDisposition,omitempty"` // DestinationTable: [Required] The destination table of the link job. DestinationTable *TableReference `json:"destinationTable,omitempty"` // SourceUri: [Required] URI of source table to link. SourceUri []string `json:"sourceUri,omitempty"` // WriteDisposition: [Optional] Specifies the action that occurs if the // destination table already exists. The following values are supported: // WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the // table data. WRITE_APPEND: If the table already exists, BigQuery // appends the data to the table. WRITE_EMPTY: If the table already // exists and contains data, a 'duplicate' error is returned in the job // result. The default value is WRITE_EMPTY. Each action is atomic and // only occurs if BigQuery is able to complete the job successfully. // Creation, truncation and append actions occur as one atomic update // upon job completion. WriteDisposition string `json:"writeDisposition,omitempty"` } type JobConfigurationLoad struct { // AllowJaggedRows: [Optional] Accept rows that are missing trailing // optional columns. The missing values are treated as nulls. If false, // records with missing trailing columns are treated as bad records, and // if there are too many bad records, an invalid error is returned in // the job result. The default value is false. Only applicable to CSV, // ignored for other formats. AllowJaggedRows bool `json:"allowJaggedRows,omitempty"` // AllowQuotedNewlines: Indicates if BigQuery should allow quoted data // sections that contain newline characters in a CSV file. The default // value is false. AllowQuotedNewlines bool `json:"allowQuotedNewlines,omitempty"` // CreateDisposition: [Optional] Specifies whether the job is allowed to // create new tables. The following values are supported: // CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the // table. CREATE_NEVER: The table must already exist. If it does not, a // 'notFound' error is returned in the job result. The default value is // CREATE_IF_NEEDED. Creation, truncation and append actions occur as // one atomic update upon job completion. CreateDisposition string `json:"createDisposition,omitempty"` // DestinationTable: [Required] The destination table to load the data // into. DestinationTable *TableReference `json:"destinationTable,omitempty"` // Encoding: [Optional] The character encoding of the data. The // supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. // BigQuery decodes the data after the raw, binary data has been split // using the values of the quote and fieldDelimiter properties. Encoding string `json:"encoding,omitempty"` // FieldDelimiter: [Optional] The separator for fields in a CSV file. // BigQuery converts the string to ISO-8859-1 encoding, and then uses // the first byte of the encoded string to split the data in its raw, // binary state. BigQuery also supports the escape sequence "\t" to // specify a tab separator. The default value is a comma (','). FieldDelimiter string `json:"fieldDelimiter,omitempty"` // IgnoreUnknownValues: [Optional] Indicates if BigQuery should allow // extra values that are not represented in the table schema. If true, // the extra values are ignored. If false, records with extra columns // are treated as bad records, and if there are too many bad records, an // invalid error is returned in the job result. The default value is // false. The sourceFormat property determines what BigQuery treats as // an extra value: CSV: Trailing columns JSON: Named values that don't // match any column names IgnoreUnknownValues bool `json:"ignoreUnknownValues,omitempty"` // MaxBadRecords: [Optional] The maximum number of bad records that // BigQuery can ignore when running the job. If the number of bad // records exceeds this value, an invalid error is returned in the job // result. The default value is 0, which requires that all records are // valid. MaxBadRecords int64 `json:"maxBadRecords,omitempty"` // ProjectionFields: [Experimental] If sourceFormat is set to // "DATASTORE_BACKUP", indicates which entity properties to load into // BigQuery from a Cloud Datastore backup. Property names are case // sensitive and must be top-level properties. If no properties are // specified, BigQuery loads all properties. If any named property isn't // found in the Cloud Datastore backup, an invalid error is returned in // the job result. ProjectionFields []string `json:"projectionFields,omitempty"` // Quote: [Optional] The value that is used to quote data sections in a // CSV file. BigQuery converts the string to ISO-8859-1 encoding, and // then uses the first byte of the encoded string to split the data in // its raw, binary state. The default value is a double-quote ('"'). If // your data does not contain quoted sections, set the property value to // an empty string. If your data contains quoted newline characters, you // must also set the allowQuotedNewlines property to true. Quote string `json:"quote,omitempty"` // Schema: [Optional] The schema for the destination table. The schema // can be omitted if the destination table already exists or if the // schema can be inferred from the loaded data. Schema *TableSchema `json:"schema,omitempty"` // SchemaInline: [Deprecated] The inline schema. For CSV schemas, // specify as "Field1:Type1[,Field2:Type2]*". For example, "foo:STRING, // bar:INTEGER, baz:FLOAT". SchemaInline string `json:"schemaInline,omitempty"` // SchemaInlineFormat: [Deprecated] The format of the schemaInline // property. SchemaInlineFormat string `json:"schemaInlineFormat,omitempty"` // SkipLeadingRows: [Optional] The number of rows at the top of a CSV // file that BigQuery will skip when loading the data. The default value // is 0. This property is useful if you have header rows in the file // that should be skipped. SkipLeadingRows int64 `json:"skipLeadingRows,omitempty"` // SourceFormat: [Optional] The format of the data files. For CSV files, // specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For // newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". The default // value is CSV. SourceFormat string `json:"sourceFormat,omitempty"` // SourceUris: [Required] The fully-qualified URIs that point to your // data in Google Cloud Storage. Each URI can contain one '*' wildcard // character and it must come after the 'bucket' name. SourceUris []string `json:"sourceUris,omitempty"` // WriteDisposition: [Optional] Specifies the action that occurs if the // destination table already exists. The following values are supported: // WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the // table data. WRITE_APPEND: If the table already exists, BigQuery // appends the data to the table. WRITE_EMPTY: If the table already // exists and contains data, a 'duplicate' error is returned in the job // result. The default value is WRITE_EMPTY. Each action is atomic and // only occurs if BigQuery is able to complete the job successfully. // Creation, truncation and append actions occur as one atomic update // upon job completion. WriteDisposition string `json:"writeDisposition,omitempty"` } type JobConfigurationQuery struct { // AllowLargeResults: If true, allows the query to produce arbitrarily // large result tables at a slight cost in performance. Requires // destinationTable to be set. AllowLargeResults bool `json:"allowLargeResults,omitempty"` // CreateDisposition: [Optional] Specifies whether the job is allowed to // create new tables. The following values are supported: // CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the // table. CREATE_NEVER: The table must already exist. If it does not, a // 'notFound' error is returned in the job result. The default value is // CREATE_IF_NEEDED. Creation, truncation and append actions occur as // one atomic update upon job completion. CreateDisposition string `json:"createDisposition,omitempty"` // DefaultDataset: [Optional] Specifies the default dataset to use for // unqualified table names in the query. DefaultDataset *DatasetReference `json:"defaultDataset,omitempty"` // DestinationTable: [Optional] Describes the table where the query // results should be stored. If not present, a new table will be created // to store the results. DestinationTable *TableReference `json:"destinationTable,omitempty"` // FlattenResults: [Optional] Flattens all nested and repeated fields in // the query results. The default value is true. allowLargeResults must // be true if this is set to false. FlattenResults bool `json:"flattenResults,omitempty"` // PreserveNulls: [Deprecated] This property is deprecated. PreserveNulls bool `json:"preserveNulls,omitempty"` // Priority: [Optional] Specifies a priority for the query. Possible // values include INTERACTIVE and BATCH. The default value is // INTERACTIVE. Priority string `json:"priority,omitempty"` // Query: [Required] BigQuery SQL query to execute. Query string `json:"query,omitempty"` // TableDefinitions: [Experimental] If querying an external data source // outside of BigQuery, describes the data format, location and other // properties of the data source. By defining these properties, the data // source can then be queried as if it were a standard BigQuery table. TableDefinitions map[string]ExternalDataConfiguration `json:"tableDefinitions,omitempty"` // UseQueryCache: [Optional] Whether to look for the result in the query // cache. The query cache is a best-effort cache that will be flushed // whenever tables in the query are modified. Moreover, the query cache // is only available when a query does not have a destination table // specified. UseQueryCache bool `json:"useQueryCache,omitempty"` // WriteDisposition: [Optional] Specifies the action that occurs if the // destination table already exists. The following values are supported: // WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the // table data. WRITE_APPEND: If the table already exists, BigQuery // appends the data to the table. WRITE_EMPTY: If the table already // exists and contains data, a 'duplicate' error is returned in the job // result. The default value is WRITE_EMPTY. Each action is atomic and // only occurs if BigQuery is able to complete the job successfully. // Creation, truncation and append actions occur as one atomic update // upon job completion. WriteDisposition string `json:"writeDisposition,omitempty"` } type JobConfigurationTableCopy struct { // CreateDisposition: [Optional] Specifies whether the job is allowed to // create new tables. The following values are supported: // CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the // table. CREATE_NEVER: The table must already exist. If it does not, a // 'notFound' error is returned in the job result. The default value is // CREATE_IF_NEEDED. Creation, truncation and append actions occur as // one atomic update upon job completion. CreateDisposition string `json:"createDisposition,omitempty"` // DestinationTable: [Required] The destination table DestinationTable *TableReference `json:"destinationTable,omitempty"` // SourceTable: [Pick one] Source table to copy. SourceTable *TableReference `json:"sourceTable,omitempty"` // SourceTables: [Pick one] Source tables to copy. SourceTables []*TableReference `json:"sourceTables,omitempty"` // WriteDisposition: [Optional] Specifies the action that occurs if the // destination table already exists. The following values are supported: // WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the // table data. WRITE_APPEND: If the table already exists, BigQuery // appends the data to the table. WRITE_EMPTY: If the table already // exists and contains data, a 'duplicate' error is returned in the job // result. The default value is WRITE_EMPTY. Each action is atomic and // only occurs if BigQuery is able to complete the job successfully. // Creation, truncation and append actions occur as one atomic update // upon job completion. WriteDisposition string `json:"writeDisposition,omitempty"` } type JobList struct { // Etag: A hash of this page of results. Etag string `json:"etag,omitempty"` // Jobs: List of jobs that were requested. Jobs []*JobListJobs `json:"jobs,omitempty"` // Kind: The resource type of the response. Kind string `json:"kind,omitempty"` // NextPageToken: A token to request the next page of results. NextPageToken string `json:"nextPageToken,omitempty"` // TotalItems: Total number of jobs in this collection. TotalItems int64 `json:"totalItems,omitempty"` } type JobListJobs struct { // Configuration: [Full-projection-only] Specifies the job // configuration. Configuration *JobConfiguration `json:"configuration,omitempty"` // ErrorResult: A result object that will be present only if the job has // failed. ErrorResult *ErrorProto `json:"errorResult,omitempty"` // Id: Unique opaque ID of the job. Id string `json:"id,omitempty"` // JobReference: Job reference uniquely identifying the job. JobReference *JobReference `json:"jobReference,omitempty"` // Kind: The resource type. Kind string `json:"kind,omitempty"` // State: Running state of the job. When the state is DONE, errorResult // can be checked to determine whether the job succeeded or failed. State string `json:"state,omitempty"` // Statistics: [Output-only] Information about the job, including // starting time and ending time of the job. Statistics *JobStatistics `json:"statistics,omitempty"` // Status: [Full-projection-only] Describes the state of the job. Status *JobStatus `json:"status,omitempty"` // UserEmail: [Full-projection-only] Email address of the user who ran // the job. UserEmail string `json:"user_email,omitempty"` } type JobReference struct { // JobId: [Required] The ID of the job. The ID must contain only letters // (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The // maximum length is 1,024 characters. JobId string `json:"jobId,omitempty"` // ProjectId: [Required] The ID of the project containing this job. ProjectId string `json:"projectId,omitempty"` } type JobStatistics struct { // CreationTime: [Output-only] Creation time of this job, in // milliseconds since the epoch. This field will be present on all jobs. CreationTime int64 `json:"creationTime,omitempty,string"` // EndTime: [Output-only] End time of this job, in milliseconds since // the epoch. This field will be present whenever a job is in the DONE // state. EndTime int64 `json:"endTime,omitempty,string"` // Extract: [Output-only] Statistics for an extract job. Extract *JobStatistics4 `json:"extract,omitempty"` // Load: [Output-only] Statistics for a load job. Load *JobStatistics3 `json:"load,omitempty"` // Query: [Output-only] Statistics for a query job. Query *JobStatistics2 `json:"query,omitempty"` // StartTime: [Output-only] Start time of this job, in milliseconds // since the epoch. This field will be present when the job transitions // from the PENDING state to either RUNNING or DONE. StartTime int64 `json:"startTime,omitempty,string"` // TotalBytesProcessed: [Output-only] [Deprecated] Use the bytes // processed in the query statistics instead. TotalBytesProcessed int64 `json:"totalBytesProcessed,omitempty,string"` } type JobStatistics2 struct { // CacheHit: [Output-only] Whether the query result was fetched from the // query cache. CacheHit bool `json:"cacheHit,omitempty"` // TotalBytesProcessed: [Output-only] Total bytes processed for this // job. TotalBytesProcessed int64 `json:"totalBytesProcessed,omitempty,string"` } type JobStatistics3 struct { // InputFileBytes: [Output-only] Number of bytes of source data in a // joad job. InputFileBytes int64 `json:"inputFileBytes,omitempty,string"` // InputFiles: [Output-only] Number of source files in a load job. InputFiles int64 `json:"inputFiles,omitempty,string"` // OutputBytes: [Output-only] Size of the loaded data in bytes. Note // that while an import job is in the running state, this value may // change. OutputBytes int64 `json:"outputBytes,omitempty,string"` // OutputRows: [Output-only] Number of rows imported in a load job. Note // that while an import job is in the running state, this value may // change. OutputRows int64 `json:"outputRows,omitempty,string"` } type JobStatistics4 struct { // DestinationUriFileCounts: [Experimental] Number of files per // destination URI or URI pattern specified in the extract // configuration. These values will be in the same order as the URIs // specified in the 'destinationUris' field. DestinationUriFileCounts googleapi.Int64s `json:"destinationUriFileCounts,omitempty"` } type JobStatus struct { // ErrorResult: [Output-only] Final error result of the job. If present, // indicates that the job has completed and was unsuccessful. ErrorResult *ErrorProto `json:"errorResult,omitempty"` // Errors: [Output-only] All errors encountered during the running of // the job. Errors here do not necessarily mean that the job has // completed or was unsuccessful. Errors []*ErrorProto `json:"errors,omitempty"` // State: [Output-only] Running state of the job. State string `json:"state,omitempty"` } type JsonValue interface{} type ProjectList struct { // Etag: A hash of the page of results Etag string `json:"etag,omitempty"` // Kind: The type of list. Kind string `json:"kind,omitempty"` // NextPageToken: A token to request the next page of results. NextPageToken string `json:"nextPageToken,omitempty"` // Projects: Projects to which you have at least READ access. Projects []*ProjectListProjects `json:"projects,omitempty"` // TotalItems: The total number of projects in the list. TotalItems int64 `json:"totalItems,omitempty"` } type ProjectListProjects struct { // FriendlyName: A descriptive name for this project. FriendlyName string `json:"friendlyName,omitempty"` // Id: An opaque ID of this project. Id string `json:"id,omitempty"` // Kind: The resource type. Kind string `json:"kind,omitempty"` // NumericId: The numeric ID of this project. NumericId uint64 `json:"numericId,omitempty,string"` // ProjectReference: A unique reference to this project. ProjectReference *ProjectReference `json:"projectReference,omitempty"` } type ProjectReference struct { // ProjectId: [Required] ID of the project. Can be either the numeric ID // or the assigned ID of the project. ProjectId string `json:"projectId,omitempty"` } type QueryRequest struct { // DefaultDataset: [Optional] Specifies the default datasetId and // projectId to assume for any unqualified table names in the query. If // not set, all table names in the query string must be qualified in the // format 'datasetId.tableId'. DefaultDataset *DatasetReference `json:"defaultDataset,omitempty"` // DryRun: [Optional] If set, don't actually run this job. A valid query // will return a mostly empty response with some processing statistics, // while an invalid query will return the same error it would if it // wasn't a dry run. DryRun bool `json:"dryRun,omitempty"` // Kind: The resource type of the request. Kind string `json:"kind,omitempty"` // MaxResults: [Optional] The maximum number of rows of data to return // per page of results. Setting this flag to a small value such as 1000 // and then paging through results might improve reliability when the // query result set is large. In addition to this limit, responses are // also limited to 10 MB. By default, there is no maximum row count, and // only the byte limit applies. MaxResults int64 `json:"maxResults,omitempty"` // PreserveNulls: [Deprecated] This property is deprecated. PreserveNulls bool `json:"preserveNulls,omitempty"` // Query: [Required] A query string, following the BigQuery query // syntax, of the query to execute. Example: "SELECT count(f1) FROM // [myProjectId:myDatasetId.myTableId]". Query string `json:"query,omitempty"` // TimeoutMs: [Optional] How long to wait for the query to complete, in // milliseconds, before the request times out and returns. Note that // this is only a timeout for the request, not the query. If the query // takes longer to run than the timeout value, the call returns without // any results and with the 'jobComplete' flag set to false. You can // call GetQueryResults() to wait for the query to complete and read the // results. The default value is 10000 milliseconds (10 seconds). TimeoutMs int64 `json:"timeoutMs,omitempty"` // UseQueryCache: [Optional] Whether to look for the result in the query // cache. The query cache is a best-effort cache that will be flushed // whenever tables in the query are modified. The default value is true. UseQueryCache bool `json:"useQueryCache,omitempty"` } type QueryResponse struct { // CacheHit: Whether the query result was fetched from the query cache. CacheHit bool `json:"cacheHit,omitempty"` // JobComplete: Whether the query has completed or not. If rows or // totalRows are present, this will always be true. If this is false, // totalRows will not be available. JobComplete bool `json:"jobComplete,omitempty"` // JobReference: Reference to the Job that was created to run the query. // This field will be present even if the original request timed out, in // which case GetQueryResults can be used to read the results once the // query has completed. Since this API only returns the first page of // results, subsequent pages can be fetched via the same mechanism // (GetQueryResults). JobReference *JobReference `json:"jobReference,omitempty"` // Kind: The resource type. Kind string `json:"kind,omitempty"` // PageToken: A token used for paging results. PageToken string `json:"pageToken,omitempty"` // Rows: An object with as many results as can be contained within the // maximum permitted reply size. To get any additional rows, you can // call GetQueryResults and specify the jobReference returned above. Rows []*TableRow `json:"rows,omitempty"` // Schema: The schema of the results. Present only when the query // completes successfully. Schema *TableSchema `json:"schema,omitempty"` // TotalBytesProcessed: The total number of bytes processed for this // query. If this query was a dry run, this is the number of bytes that // would be processed if the query were run. TotalBytesProcessed int64 `json:"totalBytesProcessed,omitempty,string"` // TotalRows: The total number of rows in the complete query result set, // which can be more than the number of rows in this single page of // results. TotalRows uint64 `json:"totalRows,omitempty,string"` } type Table struct { // CreationTime: [Output-only] The time when this table was created, in // milliseconds since the epoch. CreationTime int64 `json:"creationTime,omitempty,string"` // Description: [Optional] A user-friendly description of this table. Description string `json:"description,omitempty"` // Etag: [Output-only] A hash of this resource. Etag string `json:"etag,omitempty"` // ExpirationTime: [Optional] The time when this table expires, in // milliseconds since the epoch. If not present, the table will persist // indefinitely. Expired tables will be deleted and their storage // reclaimed. ExpirationTime int64 `json:"expirationTime,omitempty,string"` // FriendlyName: [Optional] A descriptive name for this table. FriendlyName string `json:"friendlyName,omitempty"` // Id: [Output-only] An opaque ID uniquely identifying the table. Id string `json:"id,omitempty"` // Kind: [Output-only] The type of the resource. Kind string `json:"kind,omitempty"` // LastModifiedTime: [Output-only] The time when this table was last // modified, in milliseconds since the epoch. LastModifiedTime uint64 `json:"lastModifiedTime,omitempty,string"` // NumBytes: [Output-only] The size of the table in bytes. This property // is unavailable for tables that are actively receiving streaming // inserts. NumBytes int64 `json:"numBytes,omitempty,string"` // NumRows: [Output-only] The number of rows of data in this table. This // property is unavailable for tables that are actively receiving // streaming inserts. NumRows uint64 `json:"numRows,omitempty,string"` // Schema: [Optional] Describes the schema of this table. Schema *TableSchema `json:"schema,omitempty"` // SelfLink: [Output-only] A URL that can be used to access this // resource again. SelfLink string `json:"selfLink,omitempty"` // TableReference: [Required] Reference describing the ID of this table. TableReference *TableReference `json:"tableReference,omitempty"` // Type: [Output-only] Describes the table type. The following values // are supported: TABLE: A normal BigQuery table. VIEW: A virtual table // defined by a SQL query. The default value is TABLE. Type string `json:"type,omitempty"` // View: [Optional] The view definition. View *ViewDefinition `json:"view,omitempty"` } type TableCell struct { V interface{} `json:"v,omitempty"` } type TableDataInsertAllRequest struct { // IgnoreUnknownValues: [Optional] Accept rows that contain values that // do not match the schema. The unknown values are ignored. Default is // false, which treats unknown values as errors. IgnoreUnknownValues bool `json:"ignoreUnknownValues,omitempty"` // Kind: The resource type of the response. Kind string `json:"kind,omitempty"` // Rows: The rows to insert. Rows []*TableDataInsertAllRequestRows `json:"rows,omitempty"` // SkipInvalidRows: [Optional] Insert all valid rows of a request, even // if invalid rows exist. The default value is false, which causes the // entire request to fail if any invalid rows exist. SkipInvalidRows bool `json:"skipInvalidRows,omitempty"` } type TableDataInsertAllRequestRows struct { // InsertId: [Optional] A unique ID for each row. BigQuery uses this // property to detect duplicate insertion requests on a best-effort // basis. InsertId string `json:"insertId,omitempty"` // Json: [Required] A JSON object that contains a row of data. The // object's properties and values must match the destination table's // schema. Json map[string]JsonValue `json:"json,omitempty"` } type TableDataInsertAllResponse struct { // InsertErrors: An array of errors for rows that were not inserted. InsertErrors []*TableDataInsertAllResponseInsertErrors `json:"insertErrors,omitempty"` // Kind: The resource type of the response. Kind string `json:"kind,omitempty"` } type TableDataInsertAllResponseInsertErrors struct { // Errors: Error information for the row indicated by the index // property. Errors []*ErrorProto `json:"errors,omitempty"` // Index: The index of the row that error applies to. Index int64 `json:"index,omitempty"` } type TableDataList struct { // Etag: A hash of this page of results. Etag string `json:"etag,omitempty"` // Kind: The resource type of the response. Kind string `json:"kind,omitempty"` // PageToken: A token used for paging results. Providing this token // instead of the startIndex parameter can help you retrieve stable // results when an underlying table is changing. PageToken string `json:"pageToken,omitempty"` // Rows: Rows of results. Rows []*TableRow `json:"rows,omitempty"` // TotalRows: The total number of rows in the complete table. TotalRows int64 `json:"totalRows,omitempty,string"` } type TableFieldSchema struct { // Description: [Optional] The field description. The maximum length is // 16K characters. Description string `json:"description,omitempty"` // Fields: [Optional] Describes the nested schema fields if the type // property is set to RECORD. Fields []*TableFieldSchema `json:"fields,omitempty"` // Mode: [Optional] The field mode. Possible values include NULLABLE, // REQUIRED and REPEATED. The default value is NULLABLE. Mode string `json:"mode,omitempty"` // Name: [Required] The field name. The name must contain only letters // (a-z, A-Z), numbers (0-9), or underscores (_), and must start with a // letter or underscore. The maximum length is 128 characters. Name string `json:"name,omitempty"` // Type: [Required] The field data type. Possible values include STRING, // INTEGER, FLOAT, BOOLEAN, TIMESTAMP or RECORD (where RECORD indicates // that the field contains a nested schema). Type string `json:"type,omitempty"` } type TableList struct { // Etag: A hash of this page of results. Etag string `json:"etag,omitempty"` // Kind: The type of list. Kind string `json:"kind,omitempty"` // NextPageToken: A token to request the next page of results. NextPageToken string `json:"nextPageToken,omitempty"` // Tables: Tables in the requested dataset. Tables []*TableListTables `json:"tables,omitempty"` // TotalItems: The total number of tables in the dataset. TotalItems int64 `json:"totalItems,omitempty"` } type TableListTables struct { // FriendlyName: The user-friendly name for this table. FriendlyName string `json:"friendlyName,omitempty"` // Id: An opaque ID of the table Id string `json:"id,omitempty"` // Kind: The resource type. Kind string `json:"kind,omitempty"` // TableReference: A reference uniquely identifying the table. TableReference *TableReference `json:"tableReference,omitempty"` // Type: The type of table. Possible values are: TABLE, VIEW. Type string `json:"type,omitempty"` } type TableReference struct { // DatasetId: [Required] The ID of the dataset containing this table. DatasetId string `json:"datasetId,omitempty"` // ProjectId: [Required] The ID of the project containing this table. ProjectId string `json:"projectId,omitempty"` // TableId: [Required] The ID of the table. The ID must contain only // letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum // length is 1,024 characters. TableId string `json:"tableId,omitempty"` } type TableRow struct { F []*TableCell `json:"f,omitempty"` } type TableSchema struct { // Fields: Describes the fields in a table. Fields []*TableFieldSchema `json:"fields,omitempty"` } type ViewDefinition struct { // Query: [Required] A query that BigQuery executes when the view is // referenced. Query string `json:"query,omitempty"` } // method id "bigquery.datasets.delete": type DatasetsDeleteCall struct { s *Service projectId string datasetId string opt_ map[string]interface{} } // Delete: Deletes the dataset specified by the datasetId value. Before // you can delete a dataset, you must delete all its tables, either // manually or by specifying deleteContents. Immediately after deletion, // you can create another dataset with the same name. func (r *DatasetsService) Delete(projectId string, datasetId string) *DatasetsDeleteCall { c := &DatasetsDeleteCall{s: r.s, opt_: make(map[string]interface{})} c.projectId = projectId c.datasetId = datasetId return c } // DeleteContents sets the optional parameter "deleteContents": If True, // delete all the tables in the dataset. If False and the dataset // contains tables, the request will fail. Default is False func (c *DatasetsDeleteCall) DeleteContents(deleteContents bool) *DatasetsDeleteCall { c.opt_["deleteContents"] = deleteContents return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *DatasetsDeleteCall) Fields(s ...googleapi.Field) *DatasetsDeleteCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *DatasetsDeleteCall) Do() error { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["deleteContents"]; ok { params.Set("deleteContents", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}") urls += "?" + params.Encode() req, _ := http.NewRequest("DELETE", urls, body) googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, "datasetId": c.datasetId, }) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return err } return nil // { // "description": "Deletes the dataset specified by the datasetId value. Before you can delete a dataset, you must delete all its tables, either manually or by specifying deleteContents. Immediately after deletion, you can create another dataset with the same name.", // "httpMethod": "DELETE", // "id": "bigquery.datasets.delete", // "parameterOrder": [ // "projectId", // "datasetId" // ], // "parameters": { // "datasetId": { // "description": "Dataset ID of dataset being deleted", // "location": "path", // "required": true, // "type": "string" // }, // "deleteContents": { // "description": "If True, delete all the tables in the dataset. If False and the dataset contains tables, the request will fail. Default is False", // "location": "query", // "type": "boolean" // }, // "projectId": { // "description": "Project ID of the dataset being deleted", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "projects/{projectId}/datasets/{datasetId}", // "scopes": [ // "https://www.googleapis.com/auth/bigquery", // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "bigquery.datasets.get": type DatasetsGetCall struct { s *Service projectId string datasetId string opt_ map[string]interface{} } // Get: Returns the dataset specified by datasetID. func (r *DatasetsService) Get(projectId string, datasetId string) *DatasetsGetCall { c := &DatasetsGetCall{s: r.s, opt_: make(map[string]interface{})} c.projectId = projectId c.datasetId = datasetId return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *DatasetsGetCall) Fields(s ...googleapi.Field) *DatasetsGetCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *DatasetsGetCall) Do() (*Dataset, error) { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}") urls += "?" + params.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, "datasetId": c.datasetId, }) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *Dataset if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Returns the dataset specified by datasetID.", // "httpMethod": "GET", // "id": "bigquery.datasets.get", // "parameterOrder": [ // "projectId", // "datasetId" // ], // "parameters": { // "datasetId": { // "description": "Dataset ID of the requested dataset", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { // "description": "Project ID of the requested dataset", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "projects/{projectId}/datasets/{datasetId}", // "response": { // "$ref": "Dataset" // }, // "scopes": [ // "https://www.googleapis.com/auth/bigquery", // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "bigquery.datasets.insert": type DatasetsInsertCall struct { s *Service projectId string dataset *Dataset opt_ map[string]interface{} } // Insert: Creates a new empty dataset. func (r *DatasetsService) Insert(projectId string, dataset *Dataset) *DatasetsInsertCall { c := &DatasetsInsertCall{s: r.s, opt_: make(map[string]interface{})} c.projectId = projectId c.dataset = dataset return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *DatasetsInsertCall) Fields(s ...googleapi.Field) *DatasetsInsertCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *DatasetsInsertCall) Do() (*Dataset, error) { var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.dataset) if err != nil { return nil, err } ctype := "application/json" params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets") urls += "?" + params.Encode() req, _ := http.NewRequest("POST", urls, body) googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, }) req.Header.Set("Content-Type", ctype) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *Dataset if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Creates a new empty dataset.", // "httpMethod": "POST", // "id": "bigquery.datasets.insert", // "parameterOrder": [ // "projectId" // ], // "parameters": { // "projectId": { // "description": "Project ID of the new dataset", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "projects/{projectId}/datasets", // "request": { // "$ref": "Dataset" // }, // "response": { // "$ref": "Dataset" // }, // "scopes": [ // "https://www.googleapis.com/auth/bigquery", // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "bigquery.datasets.list": type DatasetsListCall struct { s *Service projectId string opt_ map[string]interface{} } // List: Lists all datasets in the specified project to which you have // been granted the READER dataset role. func (r *DatasetsService) List(projectId string) *DatasetsListCall { c := &DatasetsListCall{s: r.s, opt_: make(map[string]interface{})} c.projectId = projectId return c } // All sets the optional parameter "all": Whether to list all datasets, // including hidden ones func (c *DatasetsListCall) All(all bool) *DatasetsListCall { c.opt_["all"] = all return c } // MaxResults sets the optional parameter "maxResults": The maximum // number of results to return func (c *DatasetsListCall) MaxResults(maxResults int64) *DatasetsListCall { c.opt_["maxResults"] = maxResults return c } // PageToken sets the optional parameter "pageToken": Page token, // returned by a previous call, to request the next page of results func (c *DatasetsListCall) PageToken(pageToken string) *DatasetsListCall { c.opt_["pageToken"] = pageToken return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *DatasetsListCall) Fields(s ...googleapi.Field) *DatasetsListCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *DatasetsListCall) Do() (*DatasetList, error) { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["all"]; ok { params.Set("all", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["maxResults"]; ok { params.Set("maxResults", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["pageToken"]; ok { params.Set("pageToken", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets") urls += "?" + params.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, }) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *DatasetList if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Lists all datasets in the specified project to which you have been granted the READER dataset role.", // "httpMethod": "GET", // "id": "bigquery.datasets.list", // "parameterOrder": [ // "projectId" // ], // "parameters": { // "all": { // "description": "Whether to list all datasets, including hidden ones", // "location": "query", // "type": "boolean" // }, // "maxResults": { // "description": "The maximum number of results to return", // "format": "uint32", // "location": "query", // "type": "integer" // }, // "pageToken": { // "description": "Page token, returned by a previous call, to request the next page of results", // "location": "query", // "type": "string" // }, // "projectId": { // "description": "Project ID of the datasets to be listed", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "projects/{projectId}/datasets", // "response": { // "$ref": "DatasetList" // }, // "scopes": [ // "https://www.googleapis.com/auth/bigquery", // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "bigquery.datasets.patch": type DatasetsPatchCall struct { s *Service projectId string datasetId string dataset *Dataset opt_ map[string]interface{} } // Patch: Updates information in an existing dataset. The update method // replaces the entire dataset resource, whereas the patch method only // replaces fields that are provided in the submitted dataset resource. // This method supports patch semantics. func (r *DatasetsService) Patch(projectId string, datasetId string, dataset *Dataset) *DatasetsPatchCall { c := &DatasetsPatchCall{s: r.s, opt_: make(map[string]interface{})} c.projectId = projectId c.datasetId = datasetId c.dataset = dataset return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *DatasetsPatchCall) Fields(s ...googleapi.Field) *DatasetsPatchCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *DatasetsPatchCall) Do() (*Dataset, error) { var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.dataset) if err != nil { return nil, err } ctype := "application/json" params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}") urls += "?" + params.Encode() req, _ := http.NewRequest("PATCH", urls, body) googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, "datasetId": c.datasetId, }) req.Header.Set("Content-Type", ctype) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *Dataset if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Updates information in an existing dataset. The update method replaces the entire dataset resource, whereas the patch method only replaces fields that are provided in the submitted dataset resource. This method supports patch semantics.", // "httpMethod": "PATCH", // "id": "bigquery.datasets.patch", // "parameterOrder": [ // "projectId", // "datasetId" // ], // "parameters": { // "datasetId": { // "description": "Dataset ID of the dataset being updated", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { // "description": "Project ID of the dataset being updated", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "projects/{projectId}/datasets/{datasetId}", // "request": { // "$ref": "Dataset" // }, // "response": { // "$ref": "Dataset" // }, // "scopes": [ // "https://www.googleapis.com/auth/bigquery", // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "bigquery.datasets.update": type DatasetsUpdateCall struct { s *Service projectId string datasetId string dataset *Dataset opt_ map[string]interface{} } // Update: Updates information in an existing dataset. The update method // replaces the entire dataset resource, whereas the patch method only // replaces fields that are provided in the submitted dataset resource. func (r *DatasetsService) Update(projectId string, datasetId string, dataset *Dataset) *DatasetsUpdateCall { c := &DatasetsUpdateCall{s: r.s, opt_: make(map[string]interface{})} c.projectId = projectId c.datasetId = datasetId c.dataset = dataset return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *DatasetsUpdateCall) Fields(s ...googleapi.Field) *DatasetsUpdateCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *DatasetsUpdateCall) Do() (*Dataset, error) { var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.dataset) if err != nil { return nil, err } ctype := "application/json" params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}") urls += "?" + params.Encode() req, _ := http.NewRequest("PUT", urls, body) googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, "datasetId": c.datasetId, }) req.Header.Set("Content-Type", ctype) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *Dataset if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Updates information in an existing dataset. The update method replaces the entire dataset resource, whereas the patch method only replaces fields that are provided in the submitted dataset resource.", // "httpMethod": "PUT", // "id": "bigquery.datasets.update", // "parameterOrder": [ // "projectId", // "datasetId" // ], // "parameters": { // "datasetId": { // "description": "Dataset ID of the dataset being updated", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { // "description": "Project ID of the dataset being updated", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "projects/{projectId}/datasets/{datasetId}", // "request": { // "$ref": "Dataset" // }, // "response": { // "$ref": "Dataset" // }, // "scopes": [ // "https://www.googleapis.com/auth/bigquery", // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "bigquery.jobs.get": type JobsGetCall struct { s *Service projectId string jobId string opt_ map[string]interface{} } // Get: Returns information about a specific job. Job information is // available for a six month period after creation. Requires that you're // the person who ran the job, or have the Is Owner project role. func (r *JobsService) Get(projectId string, jobId string) *JobsGetCall { c := &JobsGetCall{s: r.s, opt_: make(map[string]interface{})} c.projectId = projectId c.jobId = jobId return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *JobsGetCall) Fields(s ...googleapi.Field) *JobsGetCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *JobsGetCall) Do() (*Job, error) { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/jobs/{jobId}") urls += "?" + params.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, "jobId": c.jobId, }) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *Job if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Returns information about a specific job. Job information is available for a six month period after creation. Requires that you're the person who ran the job, or have the Is Owner project role.", // "httpMethod": "GET", // "id": "bigquery.jobs.get", // "parameterOrder": [ // "projectId", // "jobId" // ], // "parameters": { // "jobId": { // "description": "Job ID of the requested job", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { // "description": "Project ID of the requested job", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "projects/{projectId}/jobs/{jobId}", // "response": { // "$ref": "Job" // }, // "scopes": [ // "https://www.googleapis.com/auth/bigquery", // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "bigquery.jobs.getQueryResults": type JobsGetQueryResultsCall struct { s *Service projectId string jobId string opt_ map[string]interface{} } // GetQueryResults: Retrieves the results of a query job. func (r *JobsService) GetQueryResults(projectId string, jobId string) *JobsGetQueryResultsCall { c := &JobsGetQueryResultsCall{s: r.s, opt_: make(map[string]interface{})} c.projectId = projectId c.jobId = jobId return c } // MaxResults sets the optional parameter "maxResults": Maximum number // of results to read func (c *JobsGetQueryResultsCall) MaxResults(maxResults int64) *JobsGetQueryResultsCall { c.opt_["maxResults"] = maxResults return c } // PageToken sets the optional parameter "pageToken": Page token, // returned by a previous call, to request the next page of results func (c *JobsGetQueryResultsCall) PageToken(pageToken string) *JobsGetQueryResultsCall { c.opt_["pageToken"] = pageToken return c } // StartIndex sets the optional parameter "startIndex": Zero-based index // of the starting row func (c *JobsGetQueryResultsCall) StartIndex(startIndex uint64) *JobsGetQueryResultsCall { c.opt_["startIndex"] = startIndex return c } // TimeoutMs sets the optional parameter "timeoutMs": How long to wait // for the query to complete, in milliseconds, before returning. Default // is to return immediately. If the timeout passes before the job // completes, the request will fail with a TIMEOUT error func (c *JobsGetQueryResultsCall) TimeoutMs(timeoutMs int64) *JobsGetQueryResultsCall { c.opt_["timeoutMs"] = timeoutMs return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *JobsGetQueryResultsCall) Fields(s ...googleapi.Field) *JobsGetQueryResultsCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *JobsGetQueryResultsCall) Do() (*GetQueryResultsResponse, error) { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["maxResults"]; ok { params.Set("maxResults", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["pageToken"]; ok { params.Set("pageToken", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["startIndex"]; ok { params.Set("startIndex", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["timeoutMs"]; ok { params.Set("timeoutMs", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/queries/{jobId}") urls += "?" + params.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, "jobId": c.jobId, }) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *GetQueryResultsResponse if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Retrieves the results of a query job.", // "httpMethod": "GET", // "id": "bigquery.jobs.getQueryResults", // "parameterOrder": [ // "projectId", // "jobId" // ], // "parameters": { // "jobId": { // "description": "Job ID of the query job", // "location": "path", // "required": true, // "type": "string" // }, // "maxResults": { // "description": "Maximum number of results to read", // "format": "uint32", // "location": "query", // "type": "integer" // }, // "pageToken": { // "description": "Page token, returned by a previous call, to request the next page of results", // "location": "query", // "type": "string" // }, // "projectId": { // "description": "Project ID of the query job", // "location": "path", // "required": true, // "type": "string" // }, // "startIndex": { // "description": "Zero-based index of the starting row", // "format": "uint64", // "location": "query", // "type": "string" // }, // "timeoutMs": { // "description": "How long to wait for the query to complete, in milliseconds, before returning. Default is to return immediately. If the timeout passes before the job completes, the request will fail with a TIMEOUT error", // "format": "uint32", // "location": "query", // "type": "integer" // } // }, // "path": "projects/{projectId}/queries/{jobId}", // "response": { // "$ref": "GetQueryResultsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/bigquery", // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "bigquery.jobs.insert": type JobsInsertCall struct { s *Service projectId string job *Job opt_ map[string]interface{} media_ io.Reader resumable_ googleapi.SizeReaderAt mediaType_ string ctx_ context.Context protocol_ string } // Insert: Starts a new asynchronous job. Requires the Can View project // role. func (r *JobsService) Insert(projectId string, job *Job) *JobsInsertCall { c := &JobsInsertCall{s: r.s, opt_: make(map[string]interface{})} c.projectId = projectId c.job = job return c } // Media specifies the media to upload in a single chunk. // At most one of Media and ResumableMedia may be set. func (c *JobsInsertCall) Media(r io.Reader) *JobsInsertCall { c.media_ = r c.protocol_ = "multipart" return c } // ResumableMedia specifies the media to upload in chunks and can be cancelled with ctx. // At most one of Media and ResumableMedia may be set. // mediaType identifies the MIME media type of the upload, such as "image/png". // If mediaType is "", it will be auto-detected. func (c *JobsInsertCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *JobsInsertCall { c.ctx_ = ctx c.resumable_ = io.NewSectionReader(r, 0, size) c.mediaType_ = mediaType c.protocol_ = "resumable" return c } // ProgressUpdater provides a callback function that will be called after every chunk. // It should be a low-latency function in order to not slow down the upload operation. // This should only be called when using ResumableMedia (as opposed to Media). func (c *JobsInsertCall) ProgressUpdater(pu googleapi.ProgressUpdater) *JobsInsertCall { c.opt_["progressUpdater"] = pu return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *JobsInsertCall) Fields(s ...googleapi.Field) *JobsInsertCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *JobsInsertCall) Do() (*Job, error) { var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.job) if err != nil { return nil, err } ctype := "application/json" params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/jobs") var progressUpdater_ googleapi.ProgressUpdater if v, ok := c.opt_["progressUpdater"]; ok { if pu, ok := v.(googleapi.ProgressUpdater); ok { progressUpdater_ = pu } } if c.media_ != nil || c.resumable_ != nil { urls = strings.Replace(urls, "https://www.googleapis.com/", "https://www.googleapis.com/upload/", 1) params.Set("uploadType", c.protocol_) } urls += "?" + params.Encode() if c.protocol_ != "resumable" { var cancel func() cancel, _ = googleapi.ConditionallyIncludeMedia(c.media_, &body, &ctype) if cancel != nil { defer cancel() } } req, _ := http.NewRequest("POST", urls, body) googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, }) if c.protocol_ == "resumable" { req.ContentLength = 0 if c.mediaType_ == "" { c.mediaType_ = googleapi.DetectMediaType(c.resumable_) } req.Header.Set("X-Upload-Content-Type", c.mediaType_) req.Body = nil } else { req.Header.Set("Content-Type", ctype) } req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } if c.protocol_ == "resumable" { loc := res.Header.Get("Location") rx := &googleapi.ResumableUpload{ Client: c.s.client, UserAgent: c.s.userAgent(), URI: loc, Media: c.resumable_, MediaType: c.mediaType_, ContentLength: c.resumable_.Size(), Callback: progressUpdater_, } res, err = rx.Upload(c.ctx_) if err != nil { return nil, err } defer res.Body.Close() } var ret *Job if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Starts a new asynchronous job. Requires the Can View project role.", // "httpMethod": "POST", // "id": "bigquery.jobs.insert", // "mediaUpload": { // "accept": [ // "*/*" // ], // "protocols": { // "resumable": { // "multipart": true, // "path": "/resumable/upload/bigquery/v2/projects/{projectId}/jobs" // }, // "simple": { // "multipart": true, // "path": "/upload/bigquery/v2/projects/{projectId}/jobs" // } // } // }, // "parameterOrder": [ // "projectId" // ], // "parameters": { // "projectId": { // "description": "Project ID of the project that will be billed for the job", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "projects/{projectId}/jobs", // "request": { // "$ref": "Job" // }, // "response": { // "$ref": "Job" // }, // "scopes": [ // "https://www.googleapis.com/auth/bigquery", // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_only", // "https://www.googleapis.com/auth/devstorage.read_write" // ], // "supportsMediaUpload": true // } } // method id "bigquery.jobs.list": type JobsListCall struct { s *Service projectId string opt_ map[string]interface{} } // List: Lists all jobs that you started in the specified project. The // job list returns in reverse chronological order of when the jobs were // created, starting with the most recent job created. Requires the Can // View project role, or the Is Owner project role if you set the // allUsers property. func (r *JobsService) List(projectId string) *JobsListCall { c := &JobsListCall{s: r.s, opt_: make(map[string]interface{})} c.projectId = projectId return c } // AllUsers sets the optional parameter "allUsers": Whether to display // jobs owned by all users in the project. Default false func (c *JobsListCall) AllUsers(allUsers bool) *JobsListCall { c.opt_["allUsers"] = allUsers return c } // MaxResults sets the optional parameter "maxResults": Maximum number // of results to return func (c *JobsListCall) MaxResults(maxResults int64) *JobsListCall { c.opt_["maxResults"] = maxResults return c } // PageToken sets the optional parameter "pageToken": Page token, // returned by a previous call, to request the next page of results func (c *JobsListCall) PageToken(pageToken string) *JobsListCall { c.opt_["pageToken"] = pageToken return c } // Projection sets the optional parameter "projection": Restrict // information returned to a set of selected fields // // Possible values: // "full" - Includes all job data // "minimal" - Does not include the job configuration func (c *JobsListCall) Projection(projection string) *JobsListCall { c.opt_["projection"] = projection return c } // StateFilter sets the optional parameter "stateFilter": Filter for job // state // // Possible values: // "done" - Finished jobs // "pending" - Pending jobs // "running" - Running jobs func (c *JobsListCall) StateFilter(stateFilter string) *JobsListCall { c.opt_["stateFilter"] = stateFilter return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *JobsListCall) Fields(s ...googleapi.Field) *JobsListCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *JobsListCall) Do() (*JobList, error) { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["allUsers"]; ok { params.Set("allUsers", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["maxResults"]; ok { params.Set("maxResults", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["pageToken"]; ok { params.Set("pageToken", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["projection"]; ok { params.Set("projection", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["stateFilter"]; ok { params.Set("stateFilter", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/jobs") urls += "?" + params.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, }) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *JobList if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Lists all jobs that you started in the specified project. The job list returns in reverse chronological order of when the jobs were created, starting with the most recent job created. Requires the Can View project role, or the Is Owner project role if you set the allUsers property.", // "httpMethod": "GET", // "id": "bigquery.jobs.list", // "parameterOrder": [ // "projectId" // ], // "parameters": { // "allUsers": { // "description": "Whether to display jobs owned by all users in the project. Default false", // "location": "query", // "type": "boolean" // }, // "maxResults": { // "description": "Maximum number of results to return", // "format": "uint32", // "location": "query", // "type": "integer" // }, // "pageToken": { // "description": "Page token, returned by a previous call, to request the next page of results", // "location": "query", // "type": "string" // }, // "projectId": { // "description": "Project ID of the jobs to list", // "location": "path", // "required": true, // "type": "string" // }, // "projection": { // "description": "Restrict information returned to a set of selected fields", // "enum": [ // "full", // "minimal" // ], // "enumDescriptions": [ // "Includes all job data", // "Does not include the job configuration" // ], // "location": "query", // "type": "string" // }, // "stateFilter": { // "description": "Filter for job state", // "enum": [ // "done", // "pending", // "running" // ], // "enumDescriptions": [ // "Finished jobs", // "Pending jobs", // "Running jobs" // ], // "location": "query", // "repeated": true, // "type": "string" // } // }, // "path": "projects/{projectId}/jobs", // "response": { // "$ref": "JobList" // }, // "scopes": [ // "https://www.googleapis.com/auth/bigquery", // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "bigquery.jobs.query": type JobsQueryCall struct { s *Service projectId string queryrequest *QueryRequest opt_ map[string]interface{} } // Query: Runs a BigQuery SQL query synchronously and returns query // results if the query completes within a specified timeout. func (r *JobsService) Query(projectId string, queryrequest *QueryRequest) *JobsQueryCall { c := &JobsQueryCall{s: r.s, opt_: make(map[string]interface{})} c.projectId = projectId c.queryrequest = queryrequest return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *JobsQueryCall) Fields(s ...googleapi.Field) *JobsQueryCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *JobsQueryCall) Do() (*QueryResponse, error) { var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.queryrequest) if err != nil { return nil, err } ctype := "application/json" params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/queries") urls += "?" + params.Encode() req, _ := http.NewRequest("POST", urls, body) googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, }) req.Header.Set("Content-Type", ctype) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *QueryResponse if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Runs a BigQuery SQL query synchronously and returns query results if the query completes within a specified timeout.", // "httpMethod": "POST", // "id": "bigquery.jobs.query", // "parameterOrder": [ // "projectId" // ], // "parameters": { // "projectId": { // "description": "Project ID of the project billed for the query", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "projects/{projectId}/queries", // "request": { // "$ref": "QueryRequest" // }, // "response": { // "$ref": "QueryResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/bigquery", // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "bigquery.projects.list": type ProjectsListCall struct { s *Service opt_ map[string]interface{} } // List: Lists all projects to which you have been granted any project // role. func (r *ProjectsService) List() *ProjectsListCall { c := &ProjectsListCall{s: r.s, opt_: make(map[string]interface{})} return c } // MaxResults sets the optional parameter "maxResults": Maximum number // of results to return func (c *ProjectsListCall) MaxResults(maxResults int64) *ProjectsListCall { c.opt_["maxResults"] = maxResults return c } // PageToken sets the optional parameter "pageToken": Page token, // returned by a previous call, to request the next page of results func (c *ProjectsListCall) PageToken(pageToken string) *ProjectsListCall { c.opt_["pageToken"] = pageToken return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsListCall) Fields(s ...googleapi.Field) *ProjectsListCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ProjectsListCall) Do() (*ProjectList, error) { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["maxResults"]; ok { params.Set("maxResults", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["pageToken"]; ok { params.Set("pageToken", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "projects") urls += "?" + params.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.SetOpaque(req.URL) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *ProjectList if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Lists all projects to which you have been granted any project role.", // "httpMethod": "GET", // "id": "bigquery.projects.list", // "parameters": { // "maxResults": { // "description": "Maximum number of results to return", // "format": "uint32", // "location": "query", // "type": "integer" // }, // "pageToken": { // "description": "Page token, returned by a previous call, to request the next page of results", // "location": "query", // "type": "string" // } // }, // "path": "projects", // "response": { // "$ref": "ProjectList" // }, // "scopes": [ // "https://www.googleapis.com/auth/bigquery", // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "bigquery.tabledata.insertAll": type TabledataInsertAllCall struct { s *Service projectId string datasetId string tableId string tabledatainsertallrequest *TableDataInsertAllRequest opt_ map[string]interface{} } // InsertAll: Streams data into BigQuery one record at a time without // needing to run a load job. Requires the WRITER dataset role. func (r *TabledataService) InsertAll(projectId string, datasetId string, tableId string, tabledatainsertallrequest *TableDataInsertAllRequest) *TabledataInsertAllCall { c := &TabledataInsertAllCall{s: r.s, opt_: make(map[string]interface{})} c.projectId = projectId c.datasetId = datasetId c.tableId = tableId c.tabledatainsertallrequest = tabledatainsertallrequest return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *TabledataInsertAllCall) Fields(s ...googleapi.Field) *TabledataInsertAllCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *TabledataInsertAllCall) Do() (*TableDataInsertAllResponse, error) { var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.tabledatainsertallrequest) if err != nil { return nil, err } ctype := "application/json" params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}/tables/{tableId}/insertAll") urls += "?" + params.Encode() req, _ := http.NewRequest("POST", urls, body) googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, "datasetId": c.datasetId, "tableId": c.tableId, }) req.Header.Set("Content-Type", ctype) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *TableDataInsertAllResponse if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Streams data into BigQuery one record at a time without needing to run a load job. Requires the WRITER dataset role.", // "httpMethod": "POST", // "id": "bigquery.tabledata.insertAll", // "parameterOrder": [ // "projectId", // "datasetId", // "tableId" // ], // "parameters": { // "datasetId": { // "description": "Dataset ID of the destination table.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { // "description": "Project ID of the destination table.", // "location": "path", // "required": true, // "type": "string" // }, // "tableId": { // "description": "Table ID of the destination table.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}/insertAll", // "request": { // "$ref": "TableDataInsertAllRequest" // }, // "response": { // "$ref": "TableDataInsertAllResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/bigquery", // "https://www.googleapis.com/auth/bigquery.insertdata", // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "bigquery.tabledata.list": type TabledataListCall struct { s *Service projectId string datasetId string tableId string opt_ map[string]interface{} } // List: Retrieves table data from a specified set of rows. Requires the // READER dataset role. func (r *TabledataService) List(projectId string, datasetId string, tableId string) *TabledataListCall { c := &TabledataListCall{s: r.s, opt_: make(map[string]interface{})} c.projectId = projectId c.datasetId = datasetId c.tableId = tableId return c } // MaxResults sets the optional parameter "maxResults": Maximum number // of results to return func (c *TabledataListCall) MaxResults(maxResults int64) *TabledataListCall { c.opt_["maxResults"] = maxResults return c } // PageToken sets the optional parameter "pageToken": Page token, // returned by a previous call, identifying the result set func (c *TabledataListCall) PageToken(pageToken string) *TabledataListCall { c.opt_["pageToken"] = pageToken return c } // StartIndex sets the optional parameter "startIndex": Zero-based index // of the starting row to read func (c *TabledataListCall) StartIndex(startIndex uint64) *TabledataListCall { c.opt_["startIndex"] = startIndex return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *TabledataListCall) Fields(s ...googleapi.Field) *TabledataListCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *TabledataListCall) Do() (*TableDataList, error) { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["maxResults"]; ok { params.Set("maxResults", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["pageToken"]; ok { params.Set("pageToken", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["startIndex"]; ok { params.Set("startIndex", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}/tables/{tableId}/data") urls += "?" + params.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, "datasetId": c.datasetId, "tableId": c.tableId, }) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *TableDataList if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Retrieves table data from a specified set of rows. Requires the READER dataset role.", // "httpMethod": "GET", // "id": "bigquery.tabledata.list", // "parameterOrder": [ // "projectId", // "datasetId", // "tableId" // ], // "parameters": { // "datasetId": { // "description": "Dataset ID of the table to read", // "location": "path", // "required": true, // "type": "string" // }, // "maxResults": { // "description": "Maximum number of results to return", // "format": "uint32", // "location": "query", // "type": "integer" // }, // "pageToken": { // "description": "Page token, returned by a previous call, identifying the result set", // "location": "query", // "type": "string" // }, // "projectId": { // "description": "Project ID of the table to read", // "location": "path", // "required": true, // "type": "string" // }, // "startIndex": { // "description": "Zero-based index of the starting row to read", // "format": "uint64", // "location": "query", // "type": "string" // }, // "tableId": { // "description": "Table ID of the table to read", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}/data", // "response": { // "$ref": "TableDataList" // }, // "scopes": [ // "https://www.googleapis.com/auth/bigquery", // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "bigquery.tables.delete": type TablesDeleteCall struct { s *Service projectId string datasetId string tableId string opt_ map[string]interface{} } // Delete: Deletes the table specified by tableId from the dataset. If // the table contains data, all the data will be deleted. func (r *TablesService) Delete(projectId string, datasetId string, tableId string) *TablesDeleteCall { c := &TablesDeleteCall{s: r.s, opt_: make(map[string]interface{})} c.projectId = projectId c.datasetId = datasetId c.tableId = tableId return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *TablesDeleteCall) Fields(s ...googleapi.Field) *TablesDeleteCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *TablesDeleteCall) Do() error { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}/tables/{tableId}") urls += "?" + params.Encode() req, _ := http.NewRequest("DELETE", urls, body) googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, "datasetId": c.datasetId, "tableId": c.tableId, }) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return err } return nil // { // "description": "Deletes the table specified by tableId from the dataset. If the table contains data, all the data will be deleted.", // "httpMethod": "DELETE", // "id": "bigquery.tables.delete", // "parameterOrder": [ // "projectId", // "datasetId", // "tableId" // ], // "parameters": { // "datasetId": { // "description": "Dataset ID of the table to delete", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { // "description": "Project ID of the table to delete", // "location": "path", // "required": true, // "type": "string" // }, // "tableId": { // "description": "Table ID of the table to delete", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}", // "scopes": [ // "https://www.googleapis.com/auth/bigquery", // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "bigquery.tables.get": type TablesGetCall struct { s *Service projectId string datasetId string tableId string opt_ map[string]interface{} } // Get: Gets the specified table resource by table ID. This method does // not return the data in the table, it only returns the table resource, // which describes the structure of this table. func (r *TablesService) Get(projectId string, datasetId string, tableId string) *TablesGetCall { c := &TablesGetCall{s: r.s, opt_: make(map[string]interface{})} c.projectId = projectId c.datasetId = datasetId c.tableId = tableId return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *TablesGetCall) Fields(s ...googleapi.Field) *TablesGetCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *TablesGetCall) Do() (*Table, error) { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}/tables/{tableId}") urls += "?" + params.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, "datasetId": c.datasetId, "tableId": c.tableId, }) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *Table if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Gets the specified table resource by table ID. This method does not return the data in the table, it only returns the table resource, which describes the structure of this table.", // "httpMethod": "GET", // "id": "bigquery.tables.get", // "parameterOrder": [ // "projectId", // "datasetId", // "tableId" // ], // "parameters": { // "datasetId": { // "description": "Dataset ID of the requested table", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { // "description": "Project ID of the requested table", // "location": "path", // "required": true, // "type": "string" // }, // "tableId": { // "description": "Table ID of the requested table", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}", // "response": { // "$ref": "Table" // }, // "scopes": [ // "https://www.googleapis.com/auth/bigquery", // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "bigquery.tables.insert": type TablesInsertCall struct { s *Service projectId string datasetId string table *Table opt_ map[string]interface{} } // Insert: Creates a new, empty table in the dataset. func (r *TablesService) Insert(projectId string, datasetId string, table *Table) *TablesInsertCall { c := &TablesInsertCall{s: r.s, opt_: make(map[string]interface{})} c.projectId = projectId c.datasetId = datasetId c.table = table return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *TablesInsertCall) Fields(s ...googleapi.Field) *TablesInsertCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *TablesInsertCall) Do() (*Table, error) { var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.table) if err != nil { return nil, err } ctype := "application/json" params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}/tables") urls += "?" + params.Encode() req, _ := http.NewRequest("POST", urls, body) googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, "datasetId": c.datasetId, }) req.Header.Set("Content-Type", ctype) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *Table if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Creates a new, empty table in the dataset.", // "httpMethod": "POST", // "id": "bigquery.tables.insert", // "parameterOrder": [ // "projectId", // "datasetId" // ], // "parameters": { // "datasetId": { // "description": "Dataset ID of the new table", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { // "description": "Project ID of the new table", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "projects/{projectId}/datasets/{datasetId}/tables", // "request": { // "$ref": "Table" // }, // "response": { // "$ref": "Table" // }, // "scopes": [ // "https://www.googleapis.com/auth/bigquery", // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "bigquery.tables.list": type TablesListCall struct { s *Service projectId string datasetId string opt_ map[string]interface{} } // List: Lists all tables in the specified dataset. Requires the READER // dataset role. func (r *TablesService) List(projectId string, datasetId string) *TablesListCall { c := &TablesListCall{s: r.s, opt_: make(map[string]interface{})} c.projectId = projectId c.datasetId = datasetId return c } // MaxResults sets the optional parameter "maxResults": Maximum number // of results to return func (c *TablesListCall) MaxResults(maxResults int64) *TablesListCall { c.opt_["maxResults"] = maxResults return c } // PageToken sets the optional parameter "pageToken": Page token, // returned by a previous call, to request the next page of results func (c *TablesListCall) PageToken(pageToken string) *TablesListCall { c.opt_["pageToken"] = pageToken return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *TablesListCall) Fields(s ...googleapi.Field) *TablesListCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *TablesListCall) Do() (*TableList, error) { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["maxResults"]; ok { params.Set("maxResults", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["pageToken"]; ok { params.Set("pageToken", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}/tables") urls += "?" + params.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, "datasetId": c.datasetId, }) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *TableList if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Lists all tables in the specified dataset. Requires the READER dataset role.", // "httpMethod": "GET", // "id": "bigquery.tables.list", // "parameterOrder": [ // "projectId", // "datasetId" // ], // "parameters": { // "datasetId": { // "description": "Dataset ID of the tables to list", // "location": "path", // "required": true, // "type": "string" // }, // "maxResults": { // "description": "Maximum number of results to return", // "format": "uint32", // "location": "query", // "type": "integer" // }, // "pageToken": { // "description": "Page token, returned by a previous call, to request the next page of results", // "location": "query", // "type": "string" // }, // "projectId": { // "description": "Project ID of the tables to list", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "projects/{projectId}/datasets/{datasetId}/tables", // "response": { // "$ref": "TableList" // }, // "scopes": [ // "https://www.googleapis.com/auth/bigquery", // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "bigquery.tables.patch": type TablesPatchCall struct { s *Service projectId string datasetId string tableId string table *Table opt_ map[string]interface{} } // Patch: Updates information in an existing table. The update method // replaces the entire table resource, whereas the patch method only // replaces fields that are provided in the submitted table resource. // This method supports patch semantics. func (r *TablesService) Patch(projectId string, datasetId string, tableId string, table *Table) *TablesPatchCall { c := &TablesPatchCall{s: r.s, opt_: make(map[string]interface{})} c.projectId = projectId c.datasetId = datasetId c.tableId = tableId c.table = table return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *TablesPatchCall) Fields(s ...googleapi.Field) *TablesPatchCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *TablesPatchCall) Do() (*Table, error) { var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.table) if err != nil { return nil, err } ctype := "application/json" params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}/tables/{tableId}") urls += "?" + params.Encode() req, _ := http.NewRequest("PATCH", urls, body) googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, "datasetId": c.datasetId, "tableId": c.tableId, }) req.Header.Set("Content-Type", ctype) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *Table if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Updates information in an existing table. The update method replaces the entire table resource, whereas the patch method only replaces fields that are provided in the submitted table resource. This method supports patch semantics.", // "httpMethod": "PATCH", // "id": "bigquery.tables.patch", // "parameterOrder": [ // "projectId", // "datasetId", // "tableId" // ], // "parameters": { // "datasetId": { // "description": "Dataset ID of the table to update", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { // "description": "Project ID of the table to update", // "location": "path", // "required": true, // "type": "string" // }, // "tableId": { // "description": "Table ID of the table to update", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}", // "request": { // "$ref": "Table" // }, // "response": { // "$ref": "Table" // }, // "scopes": [ // "https://www.googleapis.com/auth/bigquery", // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "bigquery.tables.update": type TablesUpdateCall struct { s *Service projectId string datasetId string tableId string table *Table opt_ map[string]interface{} } // Update: Updates information in an existing table. The update method // replaces the entire table resource, whereas the patch method only // replaces fields that are provided in the submitted table resource. func (r *TablesService) Update(projectId string, datasetId string, tableId string, table *Table) *TablesUpdateCall { c := &TablesUpdateCall{s: r.s, opt_: make(map[string]interface{})} c.projectId = projectId c.datasetId = datasetId c.tableId = tableId c.table = table return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *TablesUpdateCall) Fields(s ...googleapi.Field) *TablesUpdateCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *TablesUpdateCall) Do() (*Table, error) { var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.table) if err != nil { return nil, err } ctype := "application/json" params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}/tables/{tableId}") urls += "?" + params.Encode() req, _ := http.NewRequest("PUT", urls, body) googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, "datasetId": c.datasetId, "tableId": c.tableId, }) req.Header.Set("Content-Type", ctype) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *Table if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Updates information in an existing table. The update method replaces the entire table resource, whereas the patch method only replaces fields that are provided in the submitted table resource.", // "httpMethod": "PUT", // "id": "bigquery.tables.update", // "parameterOrder": [ // "projectId", // "datasetId", // "tableId" // ], // "parameters": { // "datasetId": { // "description": "Dataset ID of the table to update", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { // "description": "Project ID of the table to update", // "location": "path", // "required": true, // "type": "string" // }, // "tableId": { // "description": "Table ID of the table to update", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}", // "request": { // "$ref": "Table" // }, // "response": { // "$ref": "Table" // }, // "scopes": [ // "https://www.googleapis.com/auth/bigquery", // "https://www.googleapis.com/auth/cloud-platform" // ] // } } ================================================ FILE: vendor/google.golang.org/api/container/v1beta1/container-api.json ================================================ { "kind": "discovery#restDescription", "etag": "\"ye6orv2F-1npMW3u9suM3a7C5Bo/ReRXGEgk9TcyLgT1qFhzuzuEb7E\"", "discoveryVersion": "v1", "id": "container:v1beta1", "name": "container", "version": "v1beta1", "revision": "20150504", "title": "Google Container Engine API", "description": "The Google Container Engine API is used for building and managing container based applications, powered by the open source Kubernetes technology.", "ownerDomain": "google.com", "ownerName": "Google", "icons": { "x16": "http://www.google.com/images/icons/product/search-16.gif", "x32": "http://www.google.com/images/icons/product/search-32.gif" }, "documentationLink": "https://cloud.google.com/container-engine/docs/v1beta1/", "protocol": "rest", "baseUrl": "https://www.googleapis.com/container/v1beta1/projects/", "basePath": "/container/v1beta1/projects/", "rootUrl": "https://www.googleapis.com/", "servicePath": "container/v1beta1/projects/", "batchPath": "batch", "parameters": { "alt": { "type": "string", "description": "Data format for the response.", "default": "json", "enum": [ "json" ], "enumDescriptions": [ "Responses with Content-Type of application/json" ], "location": "query" }, "fields": { "type": "string", "description": "Selector specifying which fields to include in a partial response.", "location": "query" }, "key": { "type": "string", "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", "location": "query" }, "oauth_token": { "type": "string", "description": "OAuth 2.0 token for the current user.", "location": "query" }, "prettyPrint": { "type": "boolean", "description": "Returns response with indentations and line breaks.", "default": "true", "location": "query" }, "quotaUser": { "type": "string", "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.", "location": "query" }, "userIp": { "type": "string", "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.", "location": "query" } }, "auth": { "oauth2": { "scopes": { "https://www.googleapis.com/auth/cloud-platform": { "description": "View and manage your data across Google Cloud Platform services" } } } }, "schemas": { "Cluster": { "id": "Cluster", "type": "object", "properties": { "clusterApiVersion": { "type": "string", "description": "The API version of the Kubernetes master and kubelets running in this cluster. Leave blank to pick up the latest stable release, or specify a version of the form \"x.y.z\". The Google Container Engine release notes lists the currently supported versions. If an incorrect version is specified, the server returns an error listing the currently supported versions." }, "containerIpv4Cidr": { "type": "string", "description": "The IP address range of the container pods in this cluster, in CIDR notation (e.g. 10.96.0.0/14). Leave blank to have one automatically chosen or specify a /14 block in 10.0.0.0/8 or 172.16.0.0/12." }, "creationTimestamp": { "type": "string", "description": "[Output only] The time the cluster was created, in RFC3339 text format." }, "description": { "type": "string", "description": "An optional description of this cluster." }, "enableCloudLogging": { "type": "boolean", "description": "Whether logs from the cluster should be made available via the Google Cloud Logging service. This includes both logs from your applications running in the cluster as well as logs from the Kubernetes components themselves." }, "endpoint": { "type": "string", "description": "[Output only] The IP address of this cluster's Kubernetes master. The endpoint can be accessed from the internet at https://username:password@endpoint/.\n\nSee the masterAuth property of this resource for username and password information." }, "masterAuth": { "$ref": "MasterAuth", "description": "The authentication information for accessing the master." }, "name": { "type": "string", "description": "The name of this cluster. The name must be unique within this project and zone, and can be up to 40 characters with the following restrictions: \n- Lowercase letters, numbers, and hyphens only.\n- Must start with a letter.\n- Must end with a number or a letter." }, "network": { "type": "string", "description": "The name of the Google Compute Engine network to which the cluster is connected." }, "nodeConfig": { "$ref": "NodeConfig", "description": "The machine type and image to use for all nodes in this cluster. See the descriptions of the child properties of nodeConfig." }, "nodeRoutingPrefixSize": { "type": "integer", "description": "[Output only] The size of the address space on each node for hosting containers.", "format": "int32" }, "numNodes": { "type": "integer", "description": "The number of nodes to create in this cluster. You must ensure that your Compute Engine resource quota is sufficient for this number of instances plus one (to include the master). You must also have available firewall and routes quota.", "format": "int32" }, "selfLink": { "type": "string", "description": "[Output only] Server-defined URL for the resource." }, "servicesIpv4Cidr": { "type": "string", "description": "[Output only] The IP address range of the Kubernetes services in this cluster, in CIDR notation (e.g. 1.2.3.4/29). Service addresses are typically put in the last /16 from the container CIDR." }, "status": { "type": "string", "description": "[Output only] The current status of this cluster.", "enum": [ "error", "provisioning", "running", "stopping" ], "enumDescriptions": [ "", "", "", "" ] }, "statusMessage": { "type": "string", "description": "[Output only] Additional information about the current status of this cluster, if available." }, "zone": { "type": "string", "description": "[Output only] The name of the Google Compute Engine zone in which the cluster resides." } } }, "CreateClusterRequest": { "id": "CreateClusterRequest", "type": "object", "properties": { "cluster": { "$ref": "Cluster", "description": "A cluster resource." } } }, "ListAggregatedClustersResponse": { "id": "ListAggregatedClustersResponse", "type": "object", "properties": { "clusters": { "type": "array", "description": "A list of clusters in the project, across all zones.", "items": { "$ref": "Cluster" } } } }, "ListAggregatedOperationsResponse": { "id": "ListAggregatedOperationsResponse", "type": "object", "properties": { "operations": { "type": "array", "description": "A list of operations in the project, across all zones.", "items": { "$ref": "Operation" } } } }, "ListClustersResponse": { "id": "ListClustersResponse", "type": "object", "properties": { "clusters": { "type": "array", "description": "A list of clusters in the project in the specified zone.", "items": { "$ref": "Cluster" } } } }, "ListOperationsResponse": { "id": "ListOperationsResponse", "type": "object", "properties": { "operations": { "type": "array", "description": "A list of operations in the project in the specified zone.", "items": { "$ref": "Operation" } } } }, "MasterAuth": { "id": "MasterAuth", "type": "object", "description": "The authentication information for accessing the master. Authentication is either done using HTTP basic authentication or using a bearer token.", "properties": { "bearerToken": { "type": "string", "description": "The token used to authenticate API requests to the master. The token is to be included in an HTTP Authorization Header in all requests to the master endpoint. The format of the header is: \"Authorization: Bearer \"." }, "password": { "type": "string", "description": "The password to use for HTTP basic authentication when accessing the Kubernetes master endpoint. Because the master endpoint is open to the internet, you should create a strong password." }, "user": { "type": "string", "description": "The username to use for HTTP basic authentication when accessing the Kubernetes master endpoint." } } }, "NodeConfig": { "id": "NodeConfig", "type": "object", "properties": { "machineType": { "type": "string", "description": "The name of a Google Compute Engine machine type (e.g. n1-standard-1).\n\nIf unspecified, the default machine type is n1-standard-1." }, "serviceAccounts": { "type": "array", "description": "The optional list of ServiceAccounts, each with their specified scopes, to be made available on all of the node VMs. In addition to the service accounts and scopes specified, the \"default\" account will always be created with the following scopes to ensure the correct functioning of the cluster: \n- https://www.googleapis.com/auth/compute,\n- https://www.googleapis.com/auth/devstorage.read_only", "items": { "$ref": "ServiceAccount" } }, "sourceImage": { "type": "string", "description": "The fully-specified name of a Google Compute Engine image. For example: https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/backports-debian-7-wheezy-vYYYYMMDD (where YYYMMDD is the version date).\n\nIf specifying an image, you are responsible for ensuring its compatibility with the Debian 7 backports image. We recommend leaving this field blank to accept the default backports-debian-7-wheezy value." } } }, "Operation": { "id": "Operation", "type": "object", "description": "Defines the operation resource. All fields are output only.", "properties": { "errorMessage": { "type": "string", "description": "If an error has occurred, a textual description of the error." }, "name": { "type": "string", "description": "The server-assigned ID for the operation." }, "operationType": { "type": "string", "description": "The operation type.", "enum": [ "createCluster", "deleteCluster" ], "enumDescriptions": [ "", "" ] }, "selfLink": { "type": "string", "description": "Server-defined URL for the resource." }, "status": { "type": "string", "description": "The current status of the operation.", "enum": [ "done", "pending", "running" ], "enumDescriptions": [ "", "", "" ] }, "target": { "type": "string", "description": "[Optional] The URL of the cluster resource that this operation is associated with." }, "targetLink": { "type": "string", "description": "Server-defined URL for the target of the operation." }, "zone": { "type": "string", "description": "The name of the Google Compute Engine zone in which the operation is taking place." } } }, "ServiceAccount": { "id": "ServiceAccount", "type": "object", "description": "A Compute Engine service account.", "properties": { "email": { "type": "string", "description": "Email address of the service account." }, "scopes": { "type": "array", "description": "The list of scopes to be made available for this service account.", "items": { "type": "string" } } } } }, "resources": { "projects": { "resources": { "clusters": { "methods": { "list": { "id": "container.projects.clusters.list", "path": "{projectId}/clusters", "httpMethod": "GET", "description": "Lists all clusters owned by a project across all zones.", "parameters": { "projectId": { "type": "string", "description": "The Google Developers Console project ID or project number.", "required": true, "location": "path" } }, "parameterOrder": [ "projectId" ], "response": { "$ref": "ListAggregatedClustersResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] } } }, "operations": { "methods": { "list": { "id": "container.projects.operations.list", "path": "{projectId}/operations", "httpMethod": "GET", "description": "Lists all operations in a project, across all zones.", "parameters": { "projectId": { "type": "string", "description": "The Google Developers Console project ID or project number.", "required": true, "location": "path" } }, "parameterOrder": [ "projectId" ], "response": { "$ref": "ListAggregatedOperationsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] } } }, "zones": { "resources": { "clusters": { "methods": { "create": { "id": "container.projects.zones.clusters.create", "path": "{projectId}/zones/{zoneId}/clusters", "httpMethod": "POST", "description": "Creates a cluster, consisting of the specified number and type of Google Compute Engine instances, plus a Kubernetes master instance.\n\nThe cluster is created in the project's default network.\n\nA firewall is added that allows traffic into port 443 on the master, which enables HTTPS. A firewall and a route is added for each node to allow the containers on that node to communicate with all other instances in the cluster.\n\nFinally, an entry is added to the project's global metadata indicating which CIDR range is being used by the cluster.", "parameters": { "projectId": { "type": "string", "description": "The Google Developers Console project ID or project number.", "required": true, "location": "path" }, "zoneId": { "type": "string", "description": "The name of the Google Compute Engine zone in which the cluster resides.", "required": true, "location": "path" } }, "parameterOrder": [ "projectId", "zoneId" ], "request": { "$ref": "CreateClusterRequest" }, "response": { "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, "delete": { "id": "container.projects.zones.clusters.delete", "path": "{projectId}/zones/{zoneId}/clusters/{clusterId}", "httpMethod": "DELETE", "description": "Deletes the cluster, including the Kubernetes master and all worker nodes.\n\nFirewalls and routes that were configured at cluster creation are also deleted.", "parameters": { "clusterId": { "type": "string", "description": "The name of the cluster to delete.", "required": true, "location": "path" }, "projectId": { "type": "string", "description": "The Google Developers Console project ID or project number.", "required": true, "location": "path" }, "zoneId": { "type": "string", "description": "The name of the Google Compute Engine zone in which the cluster resides.", "required": true, "location": "path" } }, "parameterOrder": [ "projectId", "zoneId", "clusterId" ], "response": { "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, "get": { "id": "container.projects.zones.clusters.get", "path": "{projectId}/zones/{zoneId}/clusters/{clusterId}", "httpMethod": "GET", "description": "Gets a specific cluster.", "parameters": { "clusterId": { "type": "string", "description": "The name of the cluster to retrieve.", "required": true, "location": "path" }, "projectId": { "type": "string", "description": "The Google Developers Console project ID or project number.", "required": true, "location": "path" }, "zoneId": { "type": "string", "description": "The name of the Google Compute Engine zone in which the cluster resides.", "required": true, "location": "path" } }, "parameterOrder": [ "projectId", "zoneId", "clusterId" ], "response": { "$ref": "Cluster" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, "list": { "id": "container.projects.zones.clusters.list", "path": "{projectId}/zones/{zoneId}/clusters", "httpMethod": "GET", "description": "Lists all clusters owned by a project in the specified zone.", "parameters": { "projectId": { "type": "string", "description": "The Google Developers Console project ID or project number.", "required": true, "location": "path" }, "zoneId": { "type": "string", "description": "The name of the Google Compute Engine zone in which the cluster resides.", "required": true, "location": "path" } }, "parameterOrder": [ "projectId", "zoneId" ], "response": { "$ref": "ListClustersResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] } } }, "operations": { "methods": { "get": { "id": "container.projects.zones.operations.get", "path": "{projectId}/zones/{zoneId}/operations/{operationId}", "httpMethod": "GET", "description": "Gets the specified operation.", "parameters": { "operationId": { "type": "string", "description": "The server-assigned name of the operation.", "required": true, "location": "path" }, "projectId": { "type": "string", "description": "The Google Developers Console project ID or project number.", "required": true, "location": "path" }, "zoneId": { "type": "string", "description": "The name of the Google Compute Engine zone in which the operation resides. This is always the same zone as the cluster with which the operation is associated.", "required": true, "location": "path" } }, "parameterOrder": [ "projectId", "zoneId", "operationId" ], "response": { "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, "list": { "id": "container.projects.zones.operations.list", "path": "{projectId}/zones/{zoneId}/operations", "httpMethod": "GET", "description": "Lists all operations in a project in a specific zone.", "parameters": { "projectId": { "type": "string", "description": "The Google Developers Console project ID or project number.", "required": true, "location": "path" }, "zoneId": { "type": "string", "description": "The name of the Google Compute Engine zone to return operations for.", "required": true, "location": "path" } }, "parameterOrder": [ "projectId", "zoneId" ], "response": { "$ref": "ListOperationsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] } } } } } } } } } ================================================ FILE: vendor/google.golang.org/api/container/v1beta1/container-gen.go ================================================ // Package container provides access to the Google Container Engine API. // // See https://cloud.google.com/container-engine/docs/v1beta1/ // // Usage example: // // import "google.golang.org/api/container/v1beta1" // ... // containerService, err := container.New(oauthHttpClient) package container import ( "bytes" "encoding/json" "errors" "fmt" "golang.org/x/net/context" "google.golang.org/api/googleapi" "io" "net/http" "net/url" "strconv" "strings" ) // Always reference these packages, just in case the auto-generated code // below doesn't. var _ = bytes.NewBuffer var _ = strconv.Itoa var _ = fmt.Sprintf var _ = json.NewDecoder var _ = io.Copy var _ = url.Parse var _ = googleapi.Version var _ = errors.New var _ = strings.Replace var _ = context.Background const apiId = "container:v1beta1" const apiName = "container" const apiVersion = "v1beta1" const basePath = "https://www.googleapis.com/container/v1beta1/projects/" // OAuth2 scopes used by this API. const ( // View and manage your data across Google Cloud Platform services CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" ) func New(client *http.Client) (*Service, error) { if client == nil { return nil, errors.New("client is nil") } s := &Service{client: client, BasePath: basePath} s.Projects = NewProjectsService(s) return s, nil } type Service struct { client *http.Client BasePath string // API endpoint base URL UserAgent string // optional additional User-Agent fragment Projects *ProjectsService } func (s *Service) userAgent() string { if s.UserAgent == "" { return googleapi.UserAgent } return googleapi.UserAgent + " " + s.UserAgent } func NewProjectsService(s *Service) *ProjectsService { rs := &ProjectsService{s: s} rs.Clusters = NewProjectsClustersService(s) rs.Operations = NewProjectsOperationsService(s) rs.Zones = NewProjectsZonesService(s) return rs } type ProjectsService struct { s *Service Clusters *ProjectsClustersService Operations *ProjectsOperationsService Zones *ProjectsZonesService } func NewProjectsClustersService(s *Service) *ProjectsClustersService { rs := &ProjectsClustersService{s: s} return rs } type ProjectsClustersService struct { s *Service } func NewProjectsOperationsService(s *Service) *ProjectsOperationsService { rs := &ProjectsOperationsService{s: s} return rs } type ProjectsOperationsService struct { s *Service } func NewProjectsZonesService(s *Service) *ProjectsZonesService { rs := &ProjectsZonesService{s: s} rs.Clusters = NewProjectsZonesClustersService(s) rs.Operations = NewProjectsZonesOperationsService(s) return rs } type ProjectsZonesService struct { s *Service Clusters *ProjectsZonesClustersService Operations *ProjectsZonesOperationsService } func NewProjectsZonesClustersService(s *Service) *ProjectsZonesClustersService { rs := &ProjectsZonesClustersService{s: s} return rs } type ProjectsZonesClustersService struct { s *Service } func NewProjectsZonesOperationsService(s *Service) *ProjectsZonesOperationsService { rs := &ProjectsZonesOperationsService{s: s} return rs } type ProjectsZonesOperationsService struct { s *Service } type Cluster struct { // ClusterApiVersion: The API version of the Kubernetes master and // kubelets running in this cluster. Leave blank to pick up the latest // stable release, or specify a version of the form "x.y.z". The Google // Container Engine release notes lists the currently supported // versions. If an incorrect version is specified, the server returns an // error listing the currently supported versions. ClusterApiVersion string `json:"clusterApiVersion,omitempty"` // ContainerIpv4Cidr: The IP address range of the container pods in this // cluster, in CIDR notation (e.g. 10.96.0.0/14). Leave blank to have // one automatically chosen or specify a /14 block in 10.0.0.0/8 or // 172.16.0.0/12. ContainerIpv4Cidr string `json:"containerIpv4Cidr,omitempty"` // CreationTimestamp: [Output only] The time the cluster was created, in // RFC3339 text format. CreationTimestamp string `json:"creationTimestamp,omitempty"` // Description: An optional description of this cluster. Description string `json:"description,omitempty"` // EnableCloudLogging: Whether logs from the cluster should be made // available via the Google Cloud Logging service. This includes both // logs from your applications running in the cluster as well as logs // from the Kubernetes components themselves. EnableCloudLogging bool `json:"enableCloudLogging,omitempty"` // Endpoint: [Output only] The IP address of this cluster's Kubernetes // master. The endpoint can be accessed from the internet at // https://username:password@endpoint/. // // See the masterAuth property of this resource for username and // password information. Endpoint string `json:"endpoint,omitempty"` // MasterAuth: The authentication information for accessing the master. MasterAuth *MasterAuth `json:"masterAuth,omitempty"` // Name: The name of this cluster. The name must be unique within this // project and zone, and can be up to 40 characters with the following // restrictions: // - Lowercase letters, numbers, and hyphens only. // - Must start with a letter. // - Must end with a number or a letter. Name string `json:"name,omitempty"` // Network: The name of the Google Compute Engine network to which the // cluster is connected. Network string `json:"network,omitempty"` // NodeConfig: The machine type and image to use for all nodes in this // cluster. See the descriptions of the child properties of nodeConfig. NodeConfig *NodeConfig `json:"nodeConfig,omitempty"` // NodeRoutingPrefixSize: [Output only] The size of the address space on // each node for hosting containers. NodeRoutingPrefixSize int64 `json:"nodeRoutingPrefixSize,omitempty"` // NumNodes: The number of nodes to create in this cluster. You must // ensure that your Compute Engine resource quota is sufficient for this // number of instances plus one (to include the master). You must also // have available firewall and routes quota. NumNodes int64 `json:"numNodes,omitempty"` // SelfLink: [Output only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` // ServicesIpv4Cidr: [Output only] The IP address range of the // Kubernetes services in this cluster, in CIDR notation (e.g. // 1.2.3.4/29). Service addresses are typically put in the last /16 from // the container CIDR. ServicesIpv4Cidr string `json:"servicesIpv4Cidr,omitempty"` // Status: [Output only] The current status of this cluster. // // Possible values: // "error" // "provisioning" // "running" // "stopping" Status string `json:"status,omitempty"` // StatusMessage: [Output only] Additional information about the current // status of this cluster, if available. StatusMessage string `json:"statusMessage,omitempty"` // Zone: [Output only] The name of the Google Compute Engine zone in // which the cluster resides. Zone string `json:"zone,omitempty"` } type CreateClusterRequest struct { // Cluster: A cluster resource. Cluster *Cluster `json:"cluster,omitempty"` } type ListAggregatedClustersResponse struct { // Clusters: A list of clusters in the project, across all zones. Clusters []*Cluster `json:"clusters,omitempty"` } type ListAggregatedOperationsResponse struct { // Operations: A list of operations in the project, across all zones. Operations []*Operation `json:"operations,omitempty"` } type ListClustersResponse struct { // Clusters: A list of clusters in the project in the specified zone. Clusters []*Cluster `json:"clusters,omitempty"` } type ListOperationsResponse struct { // Operations: A list of operations in the project in the specified // zone. Operations []*Operation `json:"operations,omitempty"` } type MasterAuth struct { // BearerToken: The token used to authenticate API requests to the // master. The token is to be included in an HTTP Authorization Header // in all requests to the master endpoint. The format of the header is: // "Authorization: Bearer ". BearerToken string `json:"bearerToken,omitempty"` // Password: The password to use for HTTP basic authentication when // accessing the Kubernetes master endpoint. Because the master endpoint // is open to the internet, you should create a strong password. Password string `json:"password,omitempty"` // User: The username to use for HTTP basic authentication when // accessing the Kubernetes master endpoint. User string `json:"user,omitempty"` } type NodeConfig struct { // MachineType: The name of a Google Compute Engine machine type (e.g. // n1-standard-1). // // If unspecified, the default machine type is n1-standard-1. MachineType string `json:"machineType,omitempty"` // ServiceAccounts: The optional list of ServiceAccounts, each with // their specified scopes, to be made available on all of the node VMs. // In addition to the service accounts and scopes specified, the // "default" account will always be created with the following scopes to // ensure the correct functioning of the cluster: // - https://www.googleapis.com/auth/compute, // - https://www.googleapis.com/auth/devstorage.read_only ServiceAccounts []*ServiceAccount `json:"serviceAccounts,omitempty"` // SourceImage: The fully-specified name of a Google Compute Engine // image. For example: // https://www.googleapis.com/compute/v1/projects/debian-cloud/global/ima // ges/backports-debian-7-wheezy-vYYYYMMDD (where YYYMMDD is the version // date). // // If specifying an image, you are responsible for ensuring its // compatibility with the Debian 7 backports image. We recommend leaving // this field blank to accept the default backports-debian-7-wheezy // value. SourceImage string `json:"sourceImage,omitempty"` } type Operation struct { // ErrorMessage: If an error has occurred, a textual description of the // error. ErrorMessage string `json:"errorMessage,omitempty"` // Name: The server-assigned ID for the operation. Name string `json:"name,omitempty"` // OperationType: The operation type. // // Possible values: // "createCluster" // "deleteCluster" OperationType string `json:"operationType,omitempty"` // SelfLink: Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` // Status: The current status of the operation. // // Possible values: // "done" // "pending" // "running" Status string `json:"status,omitempty"` // Target: [Optional] The URL of the cluster resource that this // operation is associated with. Target string `json:"target,omitempty"` // TargetLink: Server-defined URL for the target of the operation. TargetLink string `json:"targetLink,omitempty"` // Zone: The name of the Google Compute Engine zone in which the // operation is taking place. Zone string `json:"zone,omitempty"` } type ServiceAccount struct { // Email: Email address of the service account. Email string `json:"email,omitempty"` // Scopes: The list of scopes to be made available for this service // account. Scopes []string `json:"scopes,omitempty"` } // method id "container.projects.clusters.list": type ProjectsClustersListCall struct { s *Service projectId string opt_ map[string]interface{} } // List: Lists all clusters owned by a project across all zones. func (r *ProjectsClustersService) List(projectId string) *ProjectsClustersListCall { c := &ProjectsClustersListCall{s: r.s, opt_: make(map[string]interface{})} c.projectId = projectId return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsClustersListCall) Fields(s ...googleapi.Field) *ProjectsClustersListCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ProjectsClustersListCall) Do() (*ListAggregatedClustersResponse, error) { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/clusters") urls += "?" + params.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, }) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *ListAggregatedClustersResponse if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Lists all clusters owned by a project across all zones.", // "httpMethod": "GET", // "id": "container.projects.clusters.list", // "parameterOrder": [ // "projectId" // ], // "parameters": { // "projectId": { // "description": "The Google Developers Console project ID or project number.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "{projectId}/clusters", // "response": { // "$ref": "ListAggregatedClustersResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "container.projects.operations.list": type ProjectsOperationsListCall struct { s *Service projectId string opt_ map[string]interface{} } // List: Lists all operations in a project, across all zones. func (r *ProjectsOperationsService) List(projectId string) *ProjectsOperationsListCall { c := &ProjectsOperationsListCall{s: r.s, opt_: make(map[string]interface{})} c.projectId = projectId return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsOperationsListCall) Fields(s ...googleapi.Field) *ProjectsOperationsListCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ProjectsOperationsListCall) Do() (*ListAggregatedOperationsResponse, error) { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/operations") urls += "?" + params.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, }) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *ListAggregatedOperationsResponse if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Lists all operations in a project, across all zones.", // "httpMethod": "GET", // "id": "container.projects.operations.list", // "parameterOrder": [ // "projectId" // ], // "parameters": { // "projectId": { // "description": "The Google Developers Console project ID or project number.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "{projectId}/operations", // "response": { // "$ref": "ListAggregatedOperationsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "container.projects.zones.clusters.create": type ProjectsZonesClustersCreateCall struct { s *Service projectId string zoneId string createclusterrequest *CreateClusterRequest opt_ map[string]interface{} } // Create: Creates a cluster, consisting of the specified number and // type of Google Compute Engine instances, plus a Kubernetes master // instance. // // The cluster is created in the project's default network. // // A firewall is added that allows traffic into port 443 on the master, // which enables HTTPS. A firewall and a route is added for each node to // allow the containers on that node to communicate with all other // instances in the cluster. // // Finally, an entry is added to the project's global metadata // indicating which CIDR range is being used by the cluster. func (r *ProjectsZonesClustersService) Create(projectId string, zoneId string, createclusterrequest *CreateClusterRequest) *ProjectsZonesClustersCreateCall { c := &ProjectsZonesClustersCreateCall{s: r.s, opt_: make(map[string]interface{})} c.projectId = projectId c.zoneId = zoneId c.createclusterrequest = createclusterrequest return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsZonesClustersCreateCall) Fields(s ...googleapi.Field) *ProjectsZonesClustersCreateCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ProjectsZonesClustersCreateCall) Do() (*Operation, error) { var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.createclusterrequest) if err != nil { return nil, err } ctype := "application/json" params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/zones/{zoneId}/clusters") urls += "?" + params.Encode() req, _ := http.NewRequest("POST", urls, body) googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, "zoneId": c.zoneId, }) req.Header.Set("Content-Type", ctype) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *Operation if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Creates a cluster, consisting of the specified number and type of Google Compute Engine instances, plus a Kubernetes master instance.\n\nThe cluster is created in the project's default network.\n\nA firewall is added that allows traffic into port 443 on the master, which enables HTTPS. A firewall and a route is added for each node to allow the containers on that node to communicate with all other instances in the cluster.\n\nFinally, an entry is added to the project's global metadata indicating which CIDR range is being used by the cluster.", // "httpMethod": "POST", // "id": "container.projects.zones.clusters.create", // "parameterOrder": [ // "projectId", // "zoneId" // ], // "parameters": { // "projectId": { // "description": "The Google Developers Console project ID or project number.", // "location": "path", // "required": true, // "type": "string" // }, // "zoneId": { // "description": "The name of the Google Compute Engine zone in which the cluster resides.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "{projectId}/zones/{zoneId}/clusters", // "request": { // "$ref": "CreateClusterRequest" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "container.projects.zones.clusters.delete": type ProjectsZonesClustersDeleteCall struct { s *Service projectId string zoneId string clusterId string opt_ map[string]interface{} } // Delete: Deletes the cluster, including the Kubernetes master and all // worker nodes. // // Firewalls and routes that were configured at cluster creation are // also deleted. func (r *ProjectsZonesClustersService) Delete(projectId string, zoneId string, clusterId string) *ProjectsZonesClustersDeleteCall { c := &ProjectsZonesClustersDeleteCall{s: r.s, opt_: make(map[string]interface{})} c.projectId = projectId c.zoneId = zoneId c.clusterId = clusterId return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsZonesClustersDeleteCall) Fields(s ...googleapi.Field) *ProjectsZonesClustersDeleteCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ProjectsZonesClustersDeleteCall) Do() (*Operation, error) { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/zones/{zoneId}/clusters/{clusterId}") urls += "?" + params.Encode() req, _ := http.NewRequest("DELETE", urls, body) googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, "zoneId": c.zoneId, "clusterId": c.clusterId, }) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *Operation if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Deletes the cluster, including the Kubernetes master and all worker nodes.\n\nFirewalls and routes that were configured at cluster creation are also deleted.", // "httpMethod": "DELETE", // "id": "container.projects.zones.clusters.delete", // "parameterOrder": [ // "projectId", // "zoneId", // "clusterId" // ], // "parameters": { // "clusterId": { // "description": "The name of the cluster to delete.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { // "description": "The Google Developers Console project ID or project number.", // "location": "path", // "required": true, // "type": "string" // }, // "zoneId": { // "description": "The name of the Google Compute Engine zone in which the cluster resides.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "{projectId}/zones/{zoneId}/clusters/{clusterId}", // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "container.projects.zones.clusters.get": type ProjectsZonesClustersGetCall struct { s *Service projectId string zoneId string clusterId string opt_ map[string]interface{} } // Get: Gets a specific cluster. func (r *ProjectsZonesClustersService) Get(projectId string, zoneId string, clusterId string) *ProjectsZonesClustersGetCall { c := &ProjectsZonesClustersGetCall{s: r.s, opt_: make(map[string]interface{})} c.projectId = projectId c.zoneId = zoneId c.clusterId = clusterId return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsZonesClustersGetCall) Fields(s ...googleapi.Field) *ProjectsZonesClustersGetCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ProjectsZonesClustersGetCall) Do() (*Cluster, error) { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/zones/{zoneId}/clusters/{clusterId}") urls += "?" + params.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, "zoneId": c.zoneId, "clusterId": c.clusterId, }) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *Cluster if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Gets a specific cluster.", // "httpMethod": "GET", // "id": "container.projects.zones.clusters.get", // "parameterOrder": [ // "projectId", // "zoneId", // "clusterId" // ], // "parameters": { // "clusterId": { // "description": "The name of the cluster to retrieve.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { // "description": "The Google Developers Console project ID or project number.", // "location": "path", // "required": true, // "type": "string" // }, // "zoneId": { // "description": "The name of the Google Compute Engine zone in which the cluster resides.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "{projectId}/zones/{zoneId}/clusters/{clusterId}", // "response": { // "$ref": "Cluster" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "container.projects.zones.clusters.list": type ProjectsZonesClustersListCall struct { s *Service projectId string zoneId string opt_ map[string]interface{} } // List: Lists all clusters owned by a project in the specified zone. func (r *ProjectsZonesClustersService) List(projectId string, zoneId string) *ProjectsZonesClustersListCall { c := &ProjectsZonesClustersListCall{s: r.s, opt_: make(map[string]interface{})} c.projectId = projectId c.zoneId = zoneId return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsZonesClustersListCall) Fields(s ...googleapi.Field) *ProjectsZonesClustersListCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ProjectsZonesClustersListCall) Do() (*ListClustersResponse, error) { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/zones/{zoneId}/clusters") urls += "?" + params.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, "zoneId": c.zoneId, }) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *ListClustersResponse if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Lists all clusters owned by a project in the specified zone.", // "httpMethod": "GET", // "id": "container.projects.zones.clusters.list", // "parameterOrder": [ // "projectId", // "zoneId" // ], // "parameters": { // "projectId": { // "description": "The Google Developers Console project ID or project number.", // "location": "path", // "required": true, // "type": "string" // }, // "zoneId": { // "description": "The name of the Google Compute Engine zone in which the cluster resides.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "{projectId}/zones/{zoneId}/clusters", // "response": { // "$ref": "ListClustersResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "container.projects.zones.operations.get": type ProjectsZonesOperationsGetCall struct { s *Service projectId string zoneId string operationId string opt_ map[string]interface{} } // Get: Gets the specified operation. func (r *ProjectsZonesOperationsService) Get(projectId string, zoneId string, operationId string) *ProjectsZonesOperationsGetCall { c := &ProjectsZonesOperationsGetCall{s: r.s, opt_: make(map[string]interface{})} c.projectId = projectId c.zoneId = zoneId c.operationId = operationId return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsZonesOperationsGetCall) Fields(s ...googleapi.Field) *ProjectsZonesOperationsGetCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ProjectsZonesOperationsGetCall) Do() (*Operation, error) { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/zones/{zoneId}/operations/{operationId}") urls += "?" + params.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, "zoneId": c.zoneId, "operationId": c.operationId, }) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *Operation if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Gets the specified operation.", // "httpMethod": "GET", // "id": "container.projects.zones.operations.get", // "parameterOrder": [ // "projectId", // "zoneId", // "operationId" // ], // "parameters": { // "operationId": { // "description": "The server-assigned name of the operation.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { // "description": "The Google Developers Console project ID or project number.", // "location": "path", // "required": true, // "type": "string" // }, // "zoneId": { // "description": "The name of the Google Compute Engine zone in which the operation resides. This is always the same zone as the cluster with which the operation is associated.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "{projectId}/zones/{zoneId}/operations/{operationId}", // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "container.projects.zones.operations.list": type ProjectsZonesOperationsListCall struct { s *Service projectId string zoneId string opt_ map[string]interface{} } // List: Lists all operations in a project in a specific zone. func (r *ProjectsZonesOperationsService) List(projectId string, zoneId string) *ProjectsZonesOperationsListCall { c := &ProjectsZonesOperationsListCall{s: r.s, opt_: make(map[string]interface{})} c.projectId = projectId c.zoneId = zoneId return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsZonesOperationsListCall) Fields(s ...googleapi.Field) *ProjectsZonesOperationsListCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ProjectsZonesOperationsListCall) Do() (*ListOperationsResponse, error) { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/zones/{zoneId}/operations") urls += "?" + params.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, "zoneId": c.zoneId, }) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *ListOperationsResponse if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Lists all operations in a project in a specific zone.", // "httpMethod": "GET", // "id": "container.projects.zones.operations.list", // "parameterOrder": [ // "projectId", // "zoneId" // ], // "parameters": { // "projectId": { // "description": "The Google Developers Console project ID or project number.", // "location": "path", // "required": true, // "type": "string" // }, // "zoneId": { // "description": "The name of the Google Compute Engine zone to return operations for.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "{projectId}/zones/{zoneId}/operations", // "response": { // "$ref": "ListOperationsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" // ] // } } ================================================ FILE: vendor/google.golang.org/api/googleapi/googleapi.go ================================================ // Copyright 2011 Google Inc. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package googleapi contains the common code shared by all Google API // libraries. package googleapi import ( "bytes" "encoding/json" "errors" "fmt" "io" "io/ioutil" "mime/multipart" "net/http" "net/textproto" "net/url" "regexp" "strconv" "strings" "sync" "time" "golang.org/x/net/context" "google.golang.org/api/googleapi/internal/uritemplates" ) // ContentTyper is an interface for Readers which know (or would like // to override) their Content-Type. If a media body doesn't implement // ContentTyper, the type is sniffed from the content using // http.DetectContentType. type ContentTyper interface { ContentType() string } // A SizeReaderAt is a ReaderAt with a Size method. // An io.SectionReader implements SizeReaderAt. type SizeReaderAt interface { io.ReaderAt Size() int64 } const ( Version = "0.5" // statusResumeIncomplete is the code returned by the Google uploader when the transfer is not yet complete. statusResumeIncomplete = 308 // UserAgent is the header string used to identify this package. UserAgent = "google-api-go-client/" + Version // uploadPause determines the delay between failed upload attempts uploadPause = 1 * time.Second ) // Error contains an error response from the server. type Error struct { // Code is the HTTP response status code and will always be populated. Code int `json:"code"` // Message is the server response message and is only populated when // explicitly referenced by the JSON server response. Message string `json:"message"` // Body is the raw response returned by the server. // It is often but not always JSON, depending on how the request fails. Body string Errors []ErrorItem } // ErrorItem is a detailed error code & message from the Google API frontend. type ErrorItem struct { // Reason is the typed error code. For example: "some_example". Reason string `json:"reason"` // Message is the human-readable description of the error. Message string `json:"message"` } func (e *Error) Error() string { if len(e.Errors) == 0 && e.Message == "" { return fmt.Sprintf("googleapi: got HTTP response code %d with body: %v", e.Code, e.Body) } var buf bytes.Buffer fmt.Fprintf(&buf, "googleapi: Error %d: ", e.Code) if e.Message != "" { fmt.Fprintf(&buf, "%s", e.Message) } if len(e.Errors) == 0 { return strings.TrimSpace(buf.String()) } if len(e.Errors) == 1 && e.Errors[0].Message == e.Message { fmt.Fprintf(&buf, ", %s", e.Errors[0].Reason) return buf.String() } fmt.Fprintln(&buf, "\nMore details:") for _, v := range e.Errors { fmt.Fprintf(&buf, "Reason: %s, Message: %s\n", v.Reason, v.Message) } return buf.String() } type errorReply struct { Error *Error `json:"error"` } // CheckResponse returns an error (of type *Error) if the response // status code is not 2xx. func CheckResponse(res *http.Response) error { if res.StatusCode >= 200 && res.StatusCode <= 299 { return nil } slurp, err := ioutil.ReadAll(res.Body) if err == nil { jerr := new(errorReply) err = json.Unmarshal(slurp, jerr) if err == nil && jerr.Error != nil { if jerr.Error.Code == 0 { jerr.Error.Code = res.StatusCode } jerr.Error.Body = string(slurp) return jerr.Error } } return &Error{ Code: res.StatusCode, Body: string(slurp), } } type MarshalStyle bool var WithDataWrapper = MarshalStyle(true) var WithoutDataWrapper = MarshalStyle(false) func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) { buf := new(bytes.Buffer) if wrap { buf.Write([]byte(`{"data": `)) } err := json.NewEncoder(buf).Encode(v) if err != nil { return nil, err } if wrap { buf.Write([]byte(`}`)) } return buf, nil } func getMediaType(media io.Reader) (io.Reader, string) { if typer, ok := media.(ContentTyper); ok { return media, typer.ContentType() } pr, pw := io.Pipe() typ := "application/octet-stream" buf, err := ioutil.ReadAll(io.LimitReader(media, 512)) if err != nil { pw.CloseWithError(fmt.Errorf("error reading media: %v", err)) return pr, typ } typ = http.DetectContentType(buf) mr := io.MultiReader(bytes.NewReader(buf), media) go func() { _, err = io.Copy(pw, mr) if err != nil { pw.CloseWithError(fmt.Errorf("error reading media: %v", err)) return } pw.Close() }() return pr, typ } // DetectMediaType detects and returns the content type of the provided media. // If the type can not be determined, "application/octet-stream" is returned. func DetectMediaType(media io.ReaderAt) string { if typer, ok := media.(ContentTyper); ok { return typer.ContentType() } typ := "application/octet-stream" buf := make([]byte, 1024) n, err := media.ReadAt(buf, 0) buf = buf[:n] if err == nil || err == io.EOF { typ = http.DetectContentType(buf) } return typ } type Lengther interface { Len() int } // endingWithErrorReader from r until it returns an error. If the // final error from r is io.EOF and e is non-nil, e is used instead. type endingWithErrorReader struct { r io.Reader e error } func (er endingWithErrorReader) Read(p []byte) (n int, err error) { n, err = er.r.Read(p) if err == io.EOF && er.e != nil { err = er.e } return } func typeHeader(contentType string) textproto.MIMEHeader { h := make(textproto.MIMEHeader) h.Set("Content-Type", contentType) return h } // countingWriter counts the number of bytes it receives to write, but // discards them. type countingWriter struct { n *int64 } func (w countingWriter) Write(p []byte) (int, error) { *w.n += int64(len(p)) return len(p), nil } // ConditionallyIncludeMedia does nothing if media is nil. // // bodyp is an in/out parameter. It should initially point to the // reader of the application/json (or whatever) payload to send in the // API request. It's updated to point to the multipart body reader. // // ctypep is an in/out parameter. It should initially point to the // content type of the bodyp, usually "application/json". It's updated // to the "multipart/related" content type, with random boundary. // // The return value is the content-length of the entire multpart body. func ConditionallyIncludeMedia(media io.Reader, bodyp *io.Reader, ctypep *string) (cancel func(), ok bool) { if media == nil { return } // Get the media type, which might return a different reader instance. var mediaType string media, mediaType = getMediaType(media) body, bodyType := *bodyp, *ctypep pr, pw := io.Pipe() mpw := multipart.NewWriter(pw) *bodyp = pr *ctypep = "multipart/related; boundary=" + mpw.Boundary() go func() { w, err := mpw.CreatePart(typeHeader(bodyType)) if err != nil { mpw.Close() pw.CloseWithError(fmt.Errorf("googleapi: body CreatePart failed: %v", err)) return } _, err = io.Copy(w, body) if err != nil { mpw.Close() pw.CloseWithError(fmt.Errorf("googleapi: body Copy failed: %v", err)) return } w, err = mpw.CreatePart(typeHeader(mediaType)) if err != nil { mpw.Close() pw.CloseWithError(fmt.Errorf("googleapi: media CreatePart failed: %v", err)) return } _, err = io.Copy(w, media) if err != nil { mpw.Close() pw.CloseWithError(fmt.Errorf("googleapi: media Copy failed: %v", err)) return } mpw.Close() pw.Close() }() cancel = func() { pw.CloseWithError(errAborted) } return cancel, true } var errAborted = errors.New("googleapi: upload aborted") // ProgressUpdater is a function that is called upon every progress update of a resumable upload. // This is the only part of a resumable upload (from googleapi) that is usable by the developer. // The remaining usable pieces of resumable uploads is exposed in each auto-generated API. type ProgressUpdater func(current, total int64) // ResumableUpload is used by the generated APIs to provide resumable uploads. // It is not used by developers directly. type ResumableUpload struct { Client *http.Client // URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable". URI string UserAgent string // User-Agent for header of the request // Media is the object being uploaded. Media io.ReaderAt // MediaType defines the media type, e.g. "image/jpeg". MediaType string // ContentLength is the full size of the object being uploaded. ContentLength int64 mu sync.Mutex // guards progress progress int64 // number of bytes uploaded so far // Callback is an optional function that will be called upon every progress update. Callback ProgressUpdater } var ( // rangeRE matches the transfer status response from the server. $1 is the last byte index uploaded. rangeRE = regexp.MustCompile(`^bytes=0\-(\d+)$`) // chunkSize is the size of the chunks created during a resumable upload and should be a power of two. // 1<<18 is the minimum size supported by the Google uploader, and there is no maximum. chunkSize int64 = 1 << 18 ) // Progress returns the number of bytes uploaded at this point. func (rx *ResumableUpload) Progress() int64 { rx.mu.Lock() defer rx.mu.Unlock() return rx.progress } func (rx *ResumableUpload) transferStatus() (int64, *http.Response, error) { req, _ := http.NewRequest("POST", rx.URI, nil) req.ContentLength = 0 req.Header.Set("User-Agent", rx.UserAgent) req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", rx.ContentLength)) res, err := rx.Client.Do(req) if err != nil || res.StatusCode != statusResumeIncomplete { return 0, res, err } var start int64 if m := rangeRE.FindStringSubmatch(res.Header.Get("Range")); len(m) == 2 { start, err = strconv.ParseInt(m[1], 10, 64) if err != nil { return 0, nil, fmt.Errorf("unable to parse range size %v", m[1]) } start += 1 // Start at the next byte } return start, res, nil } type chunk struct { body io.Reader size int64 err error } func (rx *ResumableUpload) transferChunks(ctx context.Context) (*http.Response, error) { start, res, err := rx.transferStatus() if err != nil || res.StatusCode != statusResumeIncomplete { return res, err } for { select { // Check for cancellation case <-ctx.Done(): res.StatusCode = http.StatusRequestTimeout return res, ctx.Err() default: } reqSize := rx.ContentLength - start if reqSize > chunkSize { reqSize = chunkSize } r := io.NewSectionReader(rx.Media, start, reqSize) req, _ := http.NewRequest("POST", rx.URI, r) req.ContentLength = reqSize req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength)) req.Header.Set("Content-Type", rx.MediaType) req.Header.Set("User-Agent", rx.UserAgent) res, err = rx.Client.Do(req) start += reqSize if err == nil && (res.StatusCode == statusResumeIncomplete || res.StatusCode == http.StatusOK) { rx.mu.Lock() rx.progress = start // keep track of number of bytes sent so far rx.mu.Unlock() if rx.Callback != nil { rx.Callback(start, rx.ContentLength) } } if err != nil || res.StatusCode != statusResumeIncomplete { break } } return res, err } var sleep = time.Sleep // override in unit tests // Upload starts the process of a resumable upload with a cancellable context. // It retries indefinitely (with a pause of uploadPause between attempts) until cancelled. // It is called from the auto-generated API code and is not visible to the user. // rx is private to the auto-generated API code. func (rx *ResumableUpload) Upload(ctx context.Context) (*http.Response, error) { var res *http.Response var err error for { res, err = rx.transferChunks(ctx) if err != nil || res.StatusCode == http.StatusCreated || res.StatusCode == http.StatusOK { return res, err } select { // Check for cancellation case <-ctx.Done(): res.StatusCode = http.StatusRequestTimeout return res, ctx.Err() default: } sleep(uploadPause) } return res, err } func ResolveRelative(basestr, relstr string) string { u, _ := url.Parse(basestr) rel, _ := url.Parse(relstr) u = u.ResolveReference(rel) us := u.String() us = strings.Replace(us, "%7B", "{", -1) us = strings.Replace(us, "%7D", "}", -1) return us } // has4860Fix is whether this Go environment contains the fix for // http://golang.org/issue/4860 var has4860Fix bool // init initializes has4860Fix by checking the behavior of the net/http package. func init() { r := http.Request{ URL: &url.URL{ Scheme: "http", Opaque: "//opaque", }, } b := &bytes.Buffer{} r.Write(b) has4860Fix = bytes.HasPrefix(b.Bytes(), []byte("GET http")) } // SetOpaque sets u.Opaque from u.Path such that HTTP requests to it // don't alter any hex-escaped characters in u.Path. func SetOpaque(u *url.URL) { u.Opaque = "//" + u.Host + u.Path if !has4860Fix { u.Opaque = u.Scheme + ":" + u.Opaque } } // Expand subsitutes any {encoded} strings in the URL passed in using // the map supplied. // // This calls SetOpaque to avoid encoding of the parameters in the URL path. func Expand(u *url.URL, expansions map[string]string) { expanded, err := uritemplates.Expand(u.Path, expansions) if err == nil { u.Path = expanded SetOpaque(u) } } // CloseBody is used to close res.Body. // Prior to calling Close, it also tries to Read a small amount to see an EOF. // Not seeing an EOF can prevent HTTP Transports from reusing connections. func CloseBody(res *http.Response) { if res == nil || res.Body == nil { return } // Justification for 3 byte reads: two for up to "\r\n" after // a JSON/XML document, and then 1 to see EOF if we haven't yet. // TODO(bradfitz): detect Go 1.3+ and skip these reads. // See https://codereview.appspot.com/58240043 // and https://codereview.appspot.com/49570044 buf := make([]byte, 1) for i := 0; i < 3; i++ { _, err := res.Body.Read(buf) if err != nil { break } } res.Body.Close() } // VariantType returns the type name of the given variant. // If the map doesn't contain the named key or the value is not a []interface{}, "" is returned. // This is used to support "variant" APIs that can return one of a number of different types. func VariantType(t map[string]interface{}) string { s, _ := t["type"].(string) return s } // ConvertVariant uses the JSON encoder/decoder to fill in the struct 'dst' with the fields found in variant 'v'. // This is used to support "variant" APIs that can return one of a number of different types. // It reports whether the conversion was successful. func ConvertVariant(v map[string]interface{}, dst interface{}) bool { var buf bytes.Buffer err := json.NewEncoder(&buf).Encode(v) if err != nil { return false } return json.Unmarshal(buf.Bytes(), dst) == nil } // A Field names a field to be retrieved with a partial response. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // // Partial responses can dramatically reduce the amount of data that must be sent to your application. // In order to request partial responses, you can specify the full list of fields // that your application needs by adding the Fields option to your request. // // Field strings use camelCase with leading lower-case characters to identify fields within the response. // // For example, if your response has a "NextPageToken" and a slice of "Items" with "Id" fields, // you could request just those fields like this: // // svc.Events.List().Fields("nextPageToken", "items/id").Do() // // or if you were also interested in each Item's "Updated" field, you can combine them like this: // // svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do() // // More information about field formatting can be found here: // https://developers.google.com/+/api/#fields-syntax // // Another way to find field names is through the Google API explorer: // https://developers.google.com/apis-explorer/#p/ type Field string // CombineFields combines fields into a single string. func CombineFields(s []Field) string { r := make([]string, len(s)) for i, v := range s { r[i] = string(v) } return strings.Join(r, ",") } ================================================ FILE: vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE ================================================ Copyright (c) 2013 Joshua Tacoma Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go ================================================ // Copyright 2013 Joshua Tacoma. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package uritemplates is a level 4 implementation of RFC 6570 (URI // Template, http://tools.ietf.org/html/rfc6570). // // To use uritemplates, parse a template string and expand it with a value // map: // // template, _ := uritemplates.Parse("https://api.github.com/repos{/user,repo}") // values := make(map[string]interface{}) // values["user"] = "jtacoma" // values["repo"] = "uritemplates" // expanded, _ := template.ExpandString(values) // fmt.Printf(expanded) // package uritemplates import ( "bytes" "errors" "fmt" "reflect" "regexp" "strconv" "strings" ) var ( unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]") reserved = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]") validname = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$") hex = []byte("0123456789ABCDEF") ) func pctEncode(src []byte) []byte { dst := make([]byte, len(src)*3) for i, b := range src { buf := dst[i*3 : i*3+3] buf[0] = 0x25 buf[1] = hex[b/16] buf[2] = hex[b%16] } return dst } func escape(s string, allowReserved bool) (escaped string) { if allowReserved { escaped = string(reserved.ReplaceAllFunc([]byte(s), pctEncode)) } else { escaped = string(unreserved.ReplaceAllFunc([]byte(s), pctEncode)) } return escaped } // A UriTemplate is a parsed representation of a URI template. type UriTemplate struct { raw string parts []templatePart } // Parse parses a URI template string into a UriTemplate object. func Parse(rawtemplate string) (template *UriTemplate, err error) { template = new(UriTemplate) template.raw = rawtemplate split := strings.Split(rawtemplate, "{") template.parts = make([]templatePart, len(split)*2-1) for i, s := range split { if i == 0 { if strings.Contains(s, "}") { err = errors.New("unexpected }") break } template.parts[i].raw = s } else { subsplit := strings.Split(s, "}") if len(subsplit) != 2 { err = errors.New("malformed template") break } expression := subsplit[0] template.parts[i*2-1], err = parseExpression(expression) if err != nil { break } template.parts[i*2].raw = subsplit[1] } } if err != nil { template = nil } return template, err } type templatePart struct { raw string terms []templateTerm first string sep string named bool ifemp string allowReserved bool } type templateTerm struct { name string explode bool truncate int } func parseExpression(expression string) (result templatePart, err error) { switch expression[0] { case '+': result.sep = "," result.allowReserved = true expression = expression[1:] case '.': result.first = "." result.sep = "." expression = expression[1:] case '/': result.first = "/" result.sep = "/" expression = expression[1:] case ';': result.first = ";" result.sep = ";" result.named = true expression = expression[1:] case '?': result.first = "?" result.sep = "&" result.named = true result.ifemp = "=" expression = expression[1:] case '&': result.first = "&" result.sep = "&" result.named = true result.ifemp = "=" expression = expression[1:] case '#': result.first = "#" result.sep = "," result.allowReserved = true expression = expression[1:] default: result.sep = "," } rawterms := strings.Split(expression, ",") result.terms = make([]templateTerm, len(rawterms)) for i, raw := range rawterms { result.terms[i], err = parseTerm(raw) if err != nil { break } } return result, err } func parseTerm(term string) (result templateTerm, err error) { if strings.HasSuffix(term, "*") { result.explode = true term = term[:len(term)-1] } split := strings.Split(term, ":") if len(split) == 1 { result.name = term } else if len(split) == 2 { result.name = split[0] var parsed int64 parsed, err = strconv.ParseInt(split[1], 10, 0) result.truncate = int(parsed) } else { err = errors.New("multiple colons in same term") } if !validname.MatchString(result.name) { err = errors.New("not a valid name: " + result.name) } if result.explode && result.truncate > 0 { err = errors.New("both explode and prefix modifers on same term") } return result, err } // Expand expands a URI template with a set of values to produce a string. func (self *UriTemplate) Expand(value interface{}) (string, error) { values, ismap := value.(map[string]interface{}) if !ismap { if m, ismap := struct2map(value); !ismap { return "", errors.New("expected map[string]interface{}, struct, or pointer to struct.") } else { return self.Expand(m) } } var buf bytes.Buffer for _, p := range self.parts { err := p.expand(&buf, values) if err != nil { return "", err } } return buf.String(), nil } func (self *templatePart) expand(buf *bytes.Buffer, values map[string]interface{}) error { if len(self.raw) > 0 { buf.WriteString(self.raw) return nil } var zeroLen = buf.Len() buf.WriteString(self.first) var firstLen = buf.Len() for _, term := range self.terms { value, exists := values[term.name] if !exists { continue } if buf.Len() != firstLen { buf.WriteString(self.sep) } switch v := value.(type) { case string: self.expandString(buf, term, v) case []interface{}: self.expandArray(buf, term, v) case map[string]interface{}: if term.truncate > 0 { return errors.New("cannot truncate a map expansion") } self.expandMap(buf, term, v) default: if m, ismap := struct2map(value); ismap { if term.truncate > 0 { return errors.New("cannot truncate a map expansion") } self.expandMap(buf, term, m) } else { str := fmt.Sprintf("%v", value) self.expandString(buf, term, str) } } } if buf.Len() == firstLen { original := buf.Bytes()[:zeroLen] buf.Reset() buf.Write(original) } return nil } func (self *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) { if self.named { buf.WriteString(name) if empty { buf.WriteString(self.ifemp) } else { buf.WriteString("=") } } } func (self *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) { if len(s) > t.truncate && t.truncate > 0 { s = s[:t.truncate] } self.expandName(buf, t.name, len(s) == 0) buf.WriteString(escape(s, self.allowReserved)) } func (self *templatePart) expandArray(buf *bytes.Buffer, t templateTerm, a []interface{}) { if len(a) == 0 { return } else if !t.explode { self.expandName(buf, t.name, false) } for i, value := range a { if t.explode && i > 0 { buf.WriteString(self.sep) } else if i > 0 { buf.WriteString(",") } var s string switch v := value.(type) { case string: s = v default: s = fmt.Sprintf("%v", v) } if len(s) > t.truncate && t.truncate > 0 { s = s[:t.truncate] } if self.named && t.explode { self.expandName(buf, t.name, len(s) == 0) } buf.WriteString(escape(s, self.allowReserved)) } } func (self *templatePart) expandMap(buf *bytes.Buffer, t templateTerm, m map[string]interface{}) { if len(m) == 0 { return } if !t.explode { self.expandName(buf, t.name, len(m) == 0) } var firstLen = buf.Len() for k, value := range m { if firstLen != buf.Len() { if t.explode { buf.WriteString(self.sep) } else { buf.WriteString(",") } } var s string switch v := value.(type) { case string: s = v default: s = fmt.Sprintf("%v", v) } if t.explode { buf.WriteString(escape(k, self.allowReserved)) buf.WriteRune('=') buf.WriteString(escape(s, self.allowReserved)) } else { buf.WriteString(escape(k, self.allowReserved)) buf.WriteRune(',') buf.WriteString(escape(s, self.allowReserved)) } } } func struct2map(v interface{}) (map[string]interface{}, bool) { value := reflect.ValueOf(v) switch value.Type().Kind() { case reflect.Ptr: return struct2map(value.Elem().Interface()) case reflect.Struct: m := make(map[string]interface{}) for i := 0; i < value.NumField(); i++ { tag := value.Type().Field(i).Tag var name string if strings.Contains(string(tag), ":") { name = tag.Get("uri") } else { name = strings.TrimSpace(string(tag)) } if len(name) == 0 { name = value.Type().Field(i).Name } m[name] = value.Field(i).Interface() } return m, true } return nil, false } ================================================ FILE: vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go ================================================ package uritemplates func Expand(path string, expansions map[string]string) (string, error) { template, err := Parse(path) if err != nil { return "", err } values := make(map[string]interface{}) for k, v := range expansions { values[k] = v } return template.Expand(values) } ================================================ FILE: vendor/google.golang.org/api/googleapi/transport/apikey.go ================================================ // Copyright 2012 Google Inc. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package transport contains HTTP transports used to make // authenticated API requests. package transport import ( "errors" "net/http" ) // APIKey is an HTTP Transport which wraps an underlying transport and // appends an API Key "key" parameter to the URL of outgoing requests. type APIKey struct { // Key is the API Key to set on requests. Key string // Transport is the underlying HTTP transport. // If nil, http.DefaultTransport is used. Transport http.RoundTripper } func (t *APIKey) RoundTrip(req *http.Request) (*http.Response, error) { rt := t.Transport if rt == nil { rt = http.DefaultTransport if rt == nil { return nil, errors.New("googleapi/transport: no Transport specified or available") } } newReq := *req args := newReq.URL.Query() args.Set("key", t.Key) newReq.URL.RawQuery = args.Encode() return rt.RoundTrip(&newReq) } ================================================ FILE: vendor/google.golang.org/api/googleapi/types.go ================================================ // Copyright 2013 Google Inc. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package googleapi import ( "encoding/json" "strconv" ) // Int64s is a slice of int64s that marshal as quoted strings in JSON. type Int64s []int64 func (q *Int64s) UnmarshalJSON(raw []byte) error { *q = (*q)[:0] var ss []string if err := json.Unmarshal(raw, &ss); err != nil { return err } for _, s := range ss { v, err := strconv.ParseInt(s, 10, 64) if err != nil { return err } *q = append(*q, int64(v)) } return nil } // Int32s is a slice of int32s that marshal as quoted strings in JSON. type Int32s []int32 func (q *Int32s) UnmarshalJSON(raw []byte) error { *q = (*q)[:0] var ss []string if err := json.Unmarshal(raw, &ss); err != nil { return err } for _, s := range ss { v, err := strconv.ParseInt(s, 10, 32) if err != nil { return err } *q = append(*q, int32(v)) } return nil } // Uint64s is a slice of uint64s that marshal as quoted strings in JSON. type Uint64s []uint64 func (q *Uint64s) UnmarshalJSON(raw []byte) error { *q = (*q)[:0] var ss []string if err := json.Unmarshal(raw, &ss); err != nil { return err } for _, s := range ss { v, err := strconv.ParseUint(s, 10, 64) if err != nil { return err } *q = append(*q, uint64(v)) } return nil } // Uint32s is a slice of uint32s that marshal as quoted strings in JSON. type Uint32s []uint32 func (q *Uint32s) UnmarshalJSON(raw []byte) error { *q = (*q)[:0] var ss []string if err := json.Unmarshal(raw, &ss); err != nil { return err } for _, s := range ss { v, err := strconv.ParseUint(s, 10, 32) if err != nil { return err } *q = append(*q, uint32(v)) } return nil } // Float64s is a slice of float64s that marshal as quoted strings in JSON. type Float64s []float64 func (q *Float64s) UnmarshalJSON(raw []byte) error { *q = (*q)[:0] var ss []string if err := json.Unmarshal(raw, &ss); err != nil { return err } for _, s := range ss { v, err := strconv.ParseFloat(s, 64) if err != nil { return err } *q = append(*q, float64(v)) } return nil } func quotedList(n int, fn func(dst []byte, i int) []byte) ([]byte, error) { dst := make([]byte, 0, 2+n*10) // somewhat arbitrary dst = append(dst, '[') for i := 0; i < n; i++ { if i > 0 { dst = append(dst, ',') } dst = append(dst, '"') dst = fn(dst, i) dst = append(dst, '"') } dst = append(dst, ']') return dst, nil } func (s Int64s) MarshalJSON() ([]byte, error) { return quotedList(len(s), func(dst []byte, i int) []byte { return strconv.AppendInt(dst, s[i], 10) }) } func (s Int32s) MarshalJSON() ([]byte, error) { return quotedList(len(s), func(dst []byte, i int) []byte { return strconv.AppendInt(dst, int64(s[i]), 10) }) } func (s Uint64s) MarshalJSON() ([]byte, error) { return quotedList(len(s), func(dst []byte, i int) []byte { return strconv.AppendUint(dst, s[i], 10) }) } func (s Uint32s) MarshalJSON() ([]byte, error) { return quotedList(len(s), func(dst []byte, i int) []byte { return strconv.AppendUint(dst, uint64(s[i]), 10) }) } func (s Float64s) MarshalJSON() ([]byte, error) { return quotedList(len(s), func(dst []byte, i int) []byte { return strconv.AppendFloat(dst, s[i], 'g', -1, 64) }) } ================================================ FILE: vendor/google.golang.org/api/pubsub/v1beta2/pubsub-api.json ================================================ { "kind": "discovery#restDescription", "etag": "\"ye6orv2F-1npMW3u9suM3a7C5Bo/k747AQVNKzUoa08QT-Z1GxOMZC0\"", "discoveryVersion": "v1", "id": "pubsub:v1beta2", "name": "pubsub", "version": "v1beta2", "revision": "20150326", "title": "Google Cloud Pub/Sub API", "description": "Provides reliable, many-to-many, asynchronous messaging between applications.", "ownerDomain": "google.com", "ownerName": "Google", "icons": { "x16": "http://www.google.com/images/icons/product/search-16.gif", "x32": "http://www.google.com/images/icons/product/search-32.gif" }, "documentationLink": "", "protocol": "rest", "baseUrl": "https://pubsub.googleapis.com/v1beta2/", "basePath": "/v1beta2/", "rootUrl": "https://pubsub.googleapis.com/", "servicePath": "v1beta2/", "batchPath": "batch", "parameters": { "alt": { "type": "string", "description": "Data format for the response.", "default": "json", "enum": [ "json" ], "enumDescriptions": [ "Responses with Content-Type of application/json" ], "location": "query" }, "fields": { "type": "string", "description": "Selector specifying which fields to include in a partial response.", "location": "query" }, "key": { "type": "string", "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", "location": "query" }, "oauth_token": { "type": "string", "description": "OAuth 2.0 token for the current user.", "location": "query" }, "prettyPrint": { "type": "boolean", "description": "Returns response with indentations and line breaks.", "default": "true", "location": "query" }, "quotaUser": { "type": "string", "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.", "location": "query" }, "userIp": { "type": "string", "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.", "location": "query" } }, "auth": { "oauth2": { "scopes": { "https://www.googleapis.com/auth/cloud-platform": { "description": "View and manage your data across Google Cloud Platform services" }, "https://www.googleapis.com/auth/pubsub": { "description": "View and manage Pub/Sub topics and subscriptions" } } } }, "schemas": { "AcknowledgeRequest": { "id": "AcknowledgeRequest", "type": "object", "properties": { "ackIds": { "type": "array", "items": { "type": "string" } } } }, "Empty": { "id": "Empty", "type": "object" }, "ListSubscriptionsResponse": { "id": "ListSubscriptionsResponse", "type": "object", "properties": { "nextPageToken": { "type": "string" }, "subscriptions": { "type": "array", "items": { "$ref": "Subscription" } } } }, "ListTopicSubscriptionsResponse": { "id": "ListTopicSubscriptionsResponse", "type": "object", "properties": { "nextPageToken": { "type": "string" }, "subscriptions": { "type": "array", "items": { "type": "string" } } } }, "ListTopicsResponse": { "id": "ListTopicsResponse", "type": "object", "properties": { "nextPageToken": { "type": "string" }, "topics": { "type": "array", "items": { "$ref": "Topic" } } } }, "ModifyAckDeadlineRequest": { "id": "ModifyAckDeadlineRequest", "type": "object", "properties": { "ackDeadlineSeconds": { "type": "integer", "format": "int32" }, "ackId": { "type": "string" } } }, "ModifyPushConfigRequest": { "id": "ModifyPushConfigRequest", "type": "object", "properties": { "pushConfig": { "$ref": "PushConfig" } } }, "PublishRequest": { "id": "PublishRequest", "type": "object", "properties": { "messages": { "type": "array", "items": { "$ref": "PubsubMessage" } } } }, "PublishResponse": { "id": "PublishResponse", "type": "object", "properties": { "messageIds": { "type": "array", "items": { "type": "string" } } } }, "PubsubMessage": { "id": "PubsubMessage", "type": "object", "properties": { "attributes": { "type": "object", "additionalProperties": { "type": "string" } }, "data": { "type": "string", "format": "byte" }, "messageId": { "type": "string" } } }, "PullRequest": { "id": "PullRequest", "type": "object", "properties": { "maxMessages": { "type": "integer", "format": "int32" }, "returnImmediately": { "type": "boolean" } } }, "PullResponse": { "id": "PullResponse", "type": "object", "properties": { "receivedMessages": { "type": "array", "items": { "$ref": "ReceivedMessage" } } } }, "PushConfig": { "id": "PushConfig", "type": "object", "properties": { "attributes": { "type": "object", "additionalProperties": { "type": "string" } }, "pushEndpoint": { "type": "string" } } }, "ReceivedMessage": { "id": "ReceivedMessage", "type": "object", "properties": { "ackId": { "type": "string" }, "message": { "$ref": "PubsubMessage" } } }, "Subscription": { "id": "Subscription", "type": "object", "properties": { "ackDeadlineSeconds": { "type": "integer", "format": "int32" }, "name": { "type": "string" }, "pushConfig": { "$ref": "PushConfig" }, "topic": { "type": "string" } } }, "Topic": { "id": "Topic", "type": "object", "properties": { "name": { "type": "string" } } } }, "resources": { "projects": { "resources": { "subscriptions": { "methods": { "acknowledge": { "id": "pubsub.projects.subscriptions.acknowledge", "path": "{+subscription}:acknowledge", "httpMethod": "POST", "description": "Acknowledges the messages associated with the ack tokens in the AcknowledgeRequest. The Pub/Sub system can remove the relevant messages from the subscription. Acknowledging a message whose ack deadline has expired may succeed, but such a message may be redelivered later. Acknowledging a message more than once will not result in an error.", "parameters": { "subscription": { "type": "string", "required": true, "location": "path" } }, "parameterOrder": [ "subscription" ], "request": { "$ref": "AcknowledgeRequest" }, "response": { "$ref": "Empty" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" ] }, "create": { "id": "pubsub.projects.subscriptions.create", "path": "{+name}", "httpMethod": "PUT", "description": "Creates a subscription to a given topic for a given subscriber. If the subscription already exists, returns ALREADY_EXISTS. If the corresponding topic doesn't exist, returns NOT_FOUND. If the name is not provided in the request, the server will assign a random name for this subscription on the same project as the topic.", "parameters": { "name": { "type": "string", "required": true, "location": "path" } }, "parameterOrder": [ "name" ], "request": { "$ref": "Subscription" }, "response": { "$ref": "Subscription" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" ] }, "delete": { "id": "pubsub.projects.subscriptions.delete", "path": "{+subscription}", "httpMethod": "DELETE", "description": "Deletes an existing subscription. All pending messages in the subscription are immediately dropped. Calls to Pull after deletion will return NOT_FOUND. After a subscription is deleted, a new one may be created with the same name, but the new one has no association with the old subscription, or its topic unless the same topic is specified.", "parameters": { "subscription": { "type": "string", "required": true, "location": "path" } }, "parameterOrder": [ "subscription" ], "response": { "$ref": "Empty" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" ] }, "get": { "id": "pubsub.projects.subscriptions.get", "path": "{+subscription}", "httpMethod": "GET", "description": "Gets the configuration details of a subscription.", "parameters": { "subscription": { "type": "string", "required": true, "location": "path" } }, "parameterOrder": [ "subscription" ], "response": { "$ref": "Subscription" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" ] }, "list": { "id": "pubsub.projects.subscriptions.list", "path": "{+project}/subscriptions", "httpMethod": "GET", "description": "Lists matching subscriptions.", "parameters": { "pageSize": { "type": "integer", "format": "int32", "location": "query" }, "pageToken": { "type": "string", "location": "query" }, "project": { "type": "string", "required": true, "location": "path" } }, "parameterOrder": [ "project" ], "response": { "$ref": "ListSubscriptionsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" ] }, "modifyAckDeadline": { "id": "pubsub.projects.subscriptions.modifyAckDeadline", "path": "{+subscription}:modifyAckDeadline", "httpMethod": "POST", "description": "Modifies the ack deadline for a specific message. This method is useful to indicate that more time is needed to process a message by the subscriber, or to make the message available for redelivery if the processing was interrupted.", "parameters": { "subscription": { "type": "string", "required": true, "location": "path" } }, "parameterOrder": [ "subscription" ], "request": { "$ref": "ModifyAckDeadlineRequest" }, "response": { "$ref": "Empty" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" ] }, "modifyPushConfig": { "id": "pubsub.projects.subscriptions.modifyPushConfig", "path": "{+subscription}:modifyPushConfig", "httpMethod": "POST", "description": "Modifies the PushConfig for a specified subscription. This may be used to change a push subscription to a pull one (signified by an empty PushConfig) or vice versa, or change the endpoint URL and other attributes of a push subscription. Messages will accumulate for delivery continuously through the call regardless of changes to the PushConfig.", "parameters": { "subscription": { "type": "string", "required": true, "location": "path" } }, "parameterOrder": [ "subscription" ], "request": { "$ref": "ModifyPushConfigRequest" }, "response": { "$ref": "Empty" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" ] }, "pull": { "id": "pubsub.projects.subscriptions.pull", "path": "{+subscription}:pull", "httpMethod": "POST", "description": "Pulls messages from the server. Returns an empty list if there are no messages available in the backlog. The server may return UNAVAILABLE if there are too many concurrent pull requests pending for the given subscription.", "parameters": { "subscription": { "type": "string", "required": true, "location": "path" } }, "parameterOrder": [ "subscription" ], "request": { "$ref": "PullRequest" }, "response": { "$ref": "PullResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" ] } } }, "topics": { "methods": { "create": { "id": "pubsub.projects.topics.create", "path": "{+name}", "httpMethod": "PUT", "description": "Creates the given topic with the given name.", "parameters": { "name": { "type": "string", "required": true, "location": "path" } }, "parameterOrder": [ "name" ], "request": { "$ref": "Topic" }, "response": { "$ref": "Topic" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" ] }, "delete": { "id": "pubsub.projects.topics.delete", "path": "{+topic}", "httpMethod": "DELETE", "description": "Deletes the topic with the given name. Returns NOT_FOUND if the topic does not exist. After a topic is deleted, a new topic may be created with the same name; this is an entirely new topic with none of the old configuration or subscriptions. Existing subscriptions to this topic are not deleted.", "parameters": { "topic": { "type": "string", "required": true, "location": "path" } }, "parameterOrder": [ "topic" ], "response": { "$ref": "Empty" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" ] }, "get": { "id": "pubsub.projects.topics.get", "path": "{+topic}", "httpMethod": "GET", "description": "Gets the configuration of a topic.", "parameters": { "topic": { "type": "string", "required": true, "location": "path" } }, "parameterOrder": [ "topic" ], "response": { "$ref": "Topic" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" ] }, "list": { "id": "pubsub.projects.topics.list", "path": "{+project}/topics", "httpMethod": "GET", "description": "Lists matching topics.", "parameters": { "pageSize": { "type": "integer", "format": "int32", "location": "query" }, "pageToken": { "type": "string", "location": "query" }, "project": { "type": "string", "required": true, "location": "path" } }, "parameterOrder": [ "project" ], "response": { "$ref": "ListTopicsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" ] }, "publish": { "id": "pubsub.projects.topics.publish", "path": "{+topic}:publish", "httpMethod": "POST", "description": "Adds one or more messages to the topic. Returns NOT_FOUND if the topic does not exist.", "parameters": { "topic": { "type": "string", "required": true, "location": "path" } }, "parameterOrder": [ "topic" ], "request": { "$ref": "PublishRequest" }, "response": { "$ref": "PublishResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" ] } }, "resources": { "subscriptions": { "methods": { "list": { "id": "pubsub.projects.topics.subscriptions.list", "path": "{+topic}/subscriptions", "httpMethod": "GET", "description": "Lists the name of the subscriptions for this topic.", "parameters": { "pageSize": { "type": "integer", "format": "int32", "location": "query" }, "pageToken": { "type": "string", "location": "query" }, "topic": { "type": "string", "required": true, "location": "path" } }, "parameterOrder": [ "topic" ], "response": { "$ref": "ListTopicSubscriptionsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" ] } } } } } } } } } ================================================ FILE: vendor/google.golang.org/api/pubsub/v1beta2/pubsub-gen.go ================================================ // Package pubsub provides access to the Google Cloud Pub/Sub API. // // Usage example: // // import "google.golang.org/api/pubsub/v1beta2" // ... // pubsubService, err := pubsub.New(oauthHttpClient) package pubsub import ( "bytes" "encoding/json" "errors" "fmt" "golang.org/x/net/context" "google.golang.org/api/googleapi" "io" "net/http" "net/url" "strconv" "strings" ) // Always reference these packages, just in case the auto-generated code // below doesn't. var _ = bytes.NewBuffer var _ = strconv.Itoa var _ = fmt.Sprintf var _ = json.NewDecoder var _ = io.Copy var _ = url.Parse var _ = googleapi.Version var _ = errors.New var _ = strings.Replace var _ = context.Background const apiId = "pubsub:v1beta2" const apiName = "pubsub" const apiVersion = "v1beta2" const basePath = "https://pubsub.googleapis.com/v1beta2/" // OAuth2 scopes used by this API. const ( // View and manage your data across Google Cloud Platform services CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" // View and manage Pub/Sub topics and subscriptions PubsubScope = "https://www.googleapis.com/auth/pubsub" ) func New(client *http.Client) (*Service, error) { if client == nil { return nil, errors.New("client is nil") } s := &Service{client: client, BasePath: basePath} s.Projects = NewProjectsService(s) return s, nil } type Service struct { client *http.Client BasePath string // API endpoint base URL UserAgent string // optional additional User-Agent fragment Projects *ProjectsService } func (s *Service) userAgent() string { if s.UserAgent == "" { return googleapi.UserAgent } return googleapi.UserAgent + " " + s.UserAgent } func NewProjectsService(s *Service) *ProjectsService { rs := &ProjectsService{s: s} rs.Subscriptions = NewProjectsSubscriptionsService(s) rs.Topics = NewProjectsTopicsService(s) return rs } type ProjectsService struct { s *Service Subscriptions *ProjectsSubscriptionsService Topics *ProjectsTopicsService } func NewProjectsSubscriptionsService(s *Service) *ProjectsSubscriptionsService { rs := &ProjectsSubscriptionsService{s: s} return rs } type ProjectsSubscriptionsService struct { s *Service } func NewProjectsTopicsService(s *Service) *ProjectsTopicsService { rs := &ProjectsTopicsService{s: s} rs.Subscriptions = NewProjectsTopicsSubscriptionsService(s) return rs } type ProjectsTopicsService struct { s *Service Subscriptions *ProjectsTopicsSubscriptionsService } func NewProjectsTopicsSubscriptionsService(s *Service) *ProjectsTopicsSubscriptionsService { rs := &ProjectsTopicsSubscriptionsService{s: s} return rs } type ProjectsTopicsSubscriptionsService struct { s *Service } type AcknowledgeRequest struct { AckIds []string `json:"ackIds,omitempty"` } type Empty struct { } type ListSubscriptionsResponse struct { NextPageToken string `json:"nextPageToken,omitempty"` Subscriptions []*Subscription `json:"subscriptions,omitempty"` } type ListTopicSubscriptionsResponse struct { NextPageToken string `json:"nextPageToken,omitempty"` Subscriptions []string `json:"subscriptions,omitempty"` } type ListTopicsResponse struct { NextPageToken string `json:"nextPageToken,omitempty"` Topics []*Topic `json:"topics,omitempty"` } type ModifyAckDeadlineRequest struct { AckDeadlineSeconds int64 `json:"ackDeadlineSeconds,omitempty"` AckId string `json:"ackId,omitempty"` } type ModifyPushConfigRequest struct { PushConfig *PushConfig `json:"pushConfig,omitempty"` } type PublishRequest struct { Messages []*PubsubMessage `json:"messages,omitempty"` } type PublishResponse struct { MessageIds []string `json:"messageIds,omitempty"` } type PubsubMessage struct { Attributes map[string]string `json:"attributes,omitempty"` Data string `json:"data,omitempty"` MessageId string `json:"messageId,omitempty"` } type PullRequest struct { MaxMessages int64 `json:"maxMessages,omitempty"` ReturnImmediately bool `json:"returnImmediately,omitempty"` } type PullResponse struct { ReceivedMessages []*ReceivedMessage `json:"receivedMessages,omitempty"` } type PushConfig struct { Attributes map[string]string `json:"attributes,omitempty"` PushEndpoint string `json:"pushEndpoint,omitempty"` } type ReceivedMessage struct { AckId string `json:"ackId,omitempty"` Message *PubsubMessage `json:"message,omitempty"` } type Subscription struct { AckDeadlineSeconds int64 `json:"ackDeadlineSeconds,omitempty"` Name string `json:"name,omitempty"` PushConfig *PushConfig `json:"pushConfig,omitempty"` Topic string `json:"topic,omitempty"` } type Topic struct { Name string `json:"name,omitempty"` } // method id "pubsub.projects.subscriptions.acknowledge": type ProjectsSubscriptionsAcknowledgeCall struct { s *Service subscription string acknowledgerequest *AcknowledgeRequest opt_ map[string]interface{} } // Acknowledge: Acknowledges the messages associated with the ack tokens // in the AcknowledgeRequest. The Pub/Sub system can remove the relevant // messages from the subscription. Acknowledging a message whose ack // deadline has expired may succeed, but such a message may be // redelivered later. Acknowledging a message more than once will not // result in an error. func (r *ProjectsSubscriptionsService) Acknowledge(subscription string, acknowledgerequest *AcknowledgeRequest) *ProjectsSubscriptionsAcknowledgeCall { c := &ProjectsSubscriptionsAcknowledgeCall{s: r.s, opt_: make(map[string]interface{})} c.subscription = subscription c.acknowledgerequest = acknowledgerequest return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsSubscriptionsAcknowledgeCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsAcknowledgeCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ProjectsSubscriptionsAcknowledgeCall) Do() (*Empty, error) { var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.acknowledgerequest) if err != nil { return nil, err } ctype := "application/json" params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "{+subscription}:acknowledge") urls += "?" + params.Encode() req, _ := http.NewRequest("POST", urls, body) googleapi.Expand(req.URL, map[string]string{ "subscription": c.subscription, }) req.Header.Set("Content-Type", ctype) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *Empty if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Acknowledges the messages associated with the ack tokens in the AcknowledgeRequest. The Pub/Sub system can remove the relevant messages from the subscription. Acknowledging a message whose ack deadline has expired may succeed, but such a message may be redelivered later. Acknowledging a message more than once will not result in an error.", // "httpMethod": "POST", // "id": "pubsub.projects.subscriptions.acknowledge", // "parameterOrder": [ // "subscription" // ], // "parameters": { // "subscription": { // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "{+subscription}:acknowledge", // "request": { // "$ref": "AcknowledgeRequest" // }, // "response": { // "$ref": "Empty" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/pubsub" // ] // } } // method id "pubsub.projects.subscriptions.create": type ProjectsSubscriptionsCreateCall struct { s *Service name string subscription *Subscription opt_ map[string]interface{} } // Create: Creates a subscription to a given topic for a given // subscriber. If the subscription already exists, returns // ALREADY_EXISTS. If the corresponding topic doesn't exist, returns // NOT_FOUND. If the name is not provided in the request, the server // will assign a random name for this subscription on the same project // as the topic. func (r *ProjectsSubscriptionsService) Create(name string, subscription *Subscription) *ProjectsSubscriptionsCreateCall { c := &ProjectsSubscriptionsCreateCall{s: r.s, opt_: make(map[string]interface{})} c.name = name c.subscription = subscription return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsSubscriptionsCreateCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsCreateCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ProjectsSubscriptionsCreateCall) Do() (*Subscription, error) { var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.subscription) if err != nil { return nil, err } ctype := "application/json" params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "{+name}") urls += "?" + params.Encode() req, _ := http.NewRequest("PUT", urls, body) googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) req.Header.Set("Content-Type", ctype) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *Subscription if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Creates a subscription to a given topic for a given subscriber. If the subscription already exists, returns ALREADY_EXISTS. If the corresponding topic doesn't exist, returns NOT_FOUND. If the name is not provided in the request, the server will assign a random name for this subscription on the same project as the topic.", // "httpMethod": "PUT", // "id": "pubsub.projects.subscriptions.create", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "{+name}", // "request": { // "$ref": "Subscription" // }, // "response": { // "$ref": "Subscription" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/pubsub" // ] // } } // method id "pubsub.projects.subscriptions.delete": type ProjectsSubscriptionsDeleteCall struct { s *Service subscription string opt_ map[string]interface{} } // Delete: Deletes an existing subscription. All pending messages in the // subscription are immediately dropped. Calls to Pull after deletion // will return NOT_FOUND. After a subscription is deleted, a new one may // be created with the same name, but the new one has no association // with the old subscription, or its topic unless the same topic is // specified. func (r *ProjectsSubscriptionsService) Delete(subscription string) *ProjectsSubscriptionsDeleteCall { c := &ProjectsSubscriptionsDeleteCall{s: r.s, opt_: make(map[string]interface{})} c.subscription = subscription return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsSubscriptionsDeleteCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsDeleteCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ProjectsSubscriptionsDeleteCall) Do() (*Empty, error) { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "{+subscription}") urls += "?" + params.Encode() req, _ := http.NewRequest("DELETE", urls, body) googleapi.Expand(req.URL, map[string]string{ "subscription": c.subscription, }) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *Empty if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Deletes an existing subscription. All pending messages in the subscription are immediately dropped. Calls to Pull after deletion will return NOT_FOUND. After a subscription is deleted, a new one may be created with the same name, but the new one has no association with the old subscription, or its topic unless the same topic is specified.", // "httpMethod": "DELETE", // "id": "pubsub.projects.subscriptions.delete", // "parameterOrder": [ // "subscription" // ], // "parameters": { // "subscription": { // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "{+subscription}", // "response": { // "$ref": "Empty" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/pubsub" // ] // } } // method id "pubsub.projects.subscriptions.get": type ProjectsSubscriptionsGetCall struct { s *Service subscription string opt_ map[string]interface{} } // Get: Gets the configuration details of a subscription. func (r *ProjectsSubscriptionsService) Get(subscription string) *ProjectsSubscriptionsGetCall { c := &ProjectsSubscriptionsGetCall{s: r.s, opt_: make(map[string]interface{})} c.subscription = subscription return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsSubscriptionsGetCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsGetCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ProjectsSubscriptionsGetCall) Do() (*Subscription, error) { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "{+subscription}") urls += "?" + params.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.Expand(req.URL, map[string]string{ "subscription": c.subscription, }) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *Subscription if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Gets the configuration details of a subscription.", // "httpMethod": "GET", // "id": "pubsub.projects.subscriptions.get", // "parameterOrder": [ // "subscription" // ], // "parameters": { // "subscription": { // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "{+subscription}", // "response": { // "$ref": "Subscription" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/pubsub" // ] // } } // method id "pubsub.projects.subscriptions.list": type ProjectsSubscriptionsListCall struct { s *Service project string opt_ map[string]interface{} } // List: Lists matching subscriptions. func (r *ProjectsSubscriptionsService) List(project string) *ProjectsSubscriptionsListCall { c := &ProjectsSubscriptionsListCall{s: r.s, opt_: make(map[string]interface{})} c.project = project return c } // PageSize sets the optional parameter "pageSize": func (c *ProjectsSubscriptionsListCall) PageSize(pageSize int64) *ProjectsSubscriptionsListCall { c.opt_["pageSize"] = pageSize return c } // PageToken sets the optional parameter "pageToken": func (c *ProjectsSubscriptionsListCall) PageToken(pageToken string) *ProjectsSubscriptionsListCall { c.opt_["pageToken"] = pageToken return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsSubscriptionsListCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsListCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ProjectsSubscriptionsListCall) Do() (*ListSubscriptionsResponse, error) { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["pageSize"]; ok { params.Set("pageSize", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["pageToken"]; ok { params.Set("pageToken", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "{+project}/subscriptions") urls += "?" + params.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *ListSubscriptionsResponse if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Lists matching subscriptions.", // "httpMethod": "GET", // "id": "pubsub.projects.subscriptions.list", // "parameterOrder": [ // "project" // ], // "parameters": { // "pageSize": { // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { // "location": "query", // "type": "string" // }, // "project": { // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "{+project}/subscriptions", // "response": { // "$ref": "ListSubscriptionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/pubsub" // ] // } } // method id "pubsub.projects.subscriptions.modifyAckDeadline": type ProjectsSubscriptionsModifyAckDeadlineCall struct { s *Service subscription string modifyackdeadlinerequest *ModifyAckDeadlineRequest opt_ map[string]interface{} } // ModifyAckDeadline: Modifies the ack deadline for a specific message. // This method is useful to indicate that more time is needed to process // a message by the subscriber, or to make the message available for // redelivery if the processing was interrupted. func (r *ProjectsSubscriptionsService) ModifyAckDeadline(subscription string, modifyackdeadlinerequest *ModifyAckDeadlineRequest) *ProjectsSubscriptionsModifyAckDeadlineCall { c := &ProjectsSubscriptionsModifyAckDeadlineCall{s: r.s, opt_: make(map[string]interface{})} c.subscription = subscription c.modifyackdeadlinerequest = modifyackdeadlinerequest return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsSubscriptionsModifyAckDeadlineCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsModifyAckDeadlineCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ProjectsSubscriptionsModifyAckDeadlineCall) Do() (*Empty, error) { var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.modifyackdeadlinerequest) if err != nil { return nil, err } ctype := "application/json" params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "{+subscription}:modifyAckDeadline") urls += "?" + params.Encode() req, _ := http.NewRequest("POST", urls, body) googleapi.Expand(req.URL, map[string]string{ "subscription": c.subscription, }) req.Header.Set("Content-Type", ctype) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *Empty if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Modifies the ack deadline for a specific message. This method is useful to indicate that more time is needed to process a message by the subscriber, or to make the message available for redelivery if the processing was interrupted.", // "httpMethod": "POST", // "id": "pubsub.projects.subscriptions.modifyAckDeadline", // "parameterOrder": [ // "subscription" // ], // "parameters": { // "subscription": { // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "{+subscription}:modifyAckDeadline", // "request": { // "$ref": "ModifyAckDeadlineRequest" // }, // "response": { // "$ref": "Empty" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/pubsub" // ] // } } // method id "pubsub.projects.subscriptions.modifyPushConfig": type ProjectsSubscriptionsModifyPushConfigCall struct { s *Service subscription string modifypushconfigrequest *ModifyPushConfigRequest opt_ map[string]interface{} } // ModifyPushConfig: Modifies the PushConfig for a specified // subscription. This may be used to change a push subscription to a // pull one (signified by an empty PushConfig) or vice versa, or change // the endpoint URL and other attributes of a push subscription. // Messages will accumulate for delivery continuously through the call // regardless of changes to the PushConfig. func (r *ProjectsSubscriptionsService) ModifyPushConfig(subscription string, modifypushconfigrequest *ModifyPushConfigRequest) *ProjectsSubscriptionsModifyPushConfigCall { c := &ProjectsSubscriptionsModifyPushConfigCall{s: r.s, opt_: make(map[string]interface{})} c.subscription = subscription c.modifypushconfigrequest = modifypushconfigrequest return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsSubscriptionsModifyPushConfigCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsModifyPushConfigCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ProjectsSubscriptionsModifyPushConfigCall) Do() (*Empty, error) { var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.modifypushconfigrequest) if err != nil { return nil, err } ctype := "application/json" params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "{+subscription}:modifyPushConfig") urls += "?" + params.Encode() req, _ := http.NewRequest("POST", urls, body) googleapi.Expand(req.URL, map[string]string{ "subscription": c.subscription, }) req.Header.Set("Content-Type", ctype) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *Empty if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Modifies the PushConfig for a specified subscription. This may be used to change a push subscription to a pull one (signified by an empty PushConfig) or vice versa, or change the endpoint URL and other attributes of a push subscription. Messages will accumulate for delivery continuously through the call regardless of changes to the PushConfig.", // "httpMethod": "POST", // "id": "pubsub.projects.subscriptions.modifyPushConfig", // "parameterOrder": [ // "subscription" // ], // "parameters": { // "subscription": { // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "{+subscription}:modifyPushConfig", // "request": { // "$ref": "ModifyPushConfigRequest" // }, // "response": { // "$ref": "Empty" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/pubsub" // ] // } } // method id "pubsub.projects.subscriptions.pull": type ProjectsSubscriptionsPullCall struct { s *Service subscription string pullrequest *PullRequest opt_ map[string]interface{} } // Pull: Pulls messages from the server. Returns an empty list if there // are no messages available in the backlog. The server may return // UNAVAILABLE if there are too many concurrent pull requests pending // for the given subscription. func (r *ProjectsSubscriptionsService) Pull(subscription string, pullrequest *PullRequest) *ProjectsSubscriptionsPullCall { c := &ProjectsSubscriptionsPullCall{s: r.s, opt_: make(map[string]interface{})} c.subscription = subscription c.pullrequest = pullrequest return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsSubscriptionsPullCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsPullCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ProjectsSubscriptionsPullCall) Do() (*PullResponse, error) { var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.pullrequest) if err != nil { return nil, err } ctype := "application/json" params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "{+subscription}:pull") urls += "?" + params.Encode() req, _ := http.NewRequest("POST", urls, body) googleapi.Expand(req.URL, map[string]string{ "subscription": c.subscription, }) req.Header.Set("Content-Type", ctype) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *PullResponse if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Pulls messages from the server. Returns an empty list if there are no messages available in the backlog. The server may return UNAVAILABLE if there are too many concurrent pull requests pending for the given subscription.", // "httpMethod": "POST", // "id": "pubsub.projects.subscriptions.pull", // "parameterOrder": [ // "subscription" // ], // "parameters": { // "subscription": { // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "{+subscription}:pull", // "request": { // "$ref": "PullRequest" // }, // "response": { // "$ref": "PullResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/pubsub" // ] // } } // method id "pubsub.projects.topics.create": type ProjectsTopicsCreateCall struct { s *Service name string topic *Topic opt_ map[string]interface{} } // Create: Creates the given topic with the given name. func (r *ProjectsTopicsService) Create(name string, topic *Topic) *ProjectsTopicsCreateCall { c := &ProjectsTopicsCreateCall{s: r.s, opt_: make(map[string]interface{})} c.name = name c.topic = topic return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsTopicsCreateCall) Fields(s ...googleapi.Field) *ProjectsTopicsCreateCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ProjectsTopicsCreateCall) Do() (*Topic, error) { var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.topic) if err != nil { return nil, err } ctype := "application/json" params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "{+name}") urls += "?" + params.Encode() req, _ := http.NewRequest("PUT", urls, body) googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) req.Header.Set("Content-Type", ctype) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *Topic if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Creates the given topic with the given name.", // "httpMethod": "PUT", // "id": "pubsub.projects.topics.create", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "{+name}", // "request": { // "$ref": "Topic" // }, // "response": { // "$ref": "Topic" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/pubsub" // ] // } } // method id "pubsub.projects.topics.delete": type ProjectsTopicsDeleteCall struct { s *Service topic string opt_ map[string]interface{} } // Delete: Deletes the topic with the given name. Returns NOT_FOUND if // the topic does not exist. After a topic is deleted, a new topic may // be created with the same name; this is an entirely new topic with // none of the old configuration or subscriptions. Existing // subscriptions to this topic are not deleted. func (r *ProjectsTopicsService) Delete(topic string) *ProjectsTopicsDeleteCall { c := &ProjectsTopicsDeleteCall{s: r.s, opt_: make(map[string]interface{})} c.topic = topic return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsTopicsDeleteCall) Fields(s ...googleapi.Field) *ProjectsTopicsDeleteCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ProjectsTopicsDeleteCall) Do() (*Empty, error) { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "{+topic}") urls += "?" + params.Encode() req, _ := http.NewRequest("DELETE", urls, body) googleapi.Expand(req.URL, map[string]string{ "topic": c.topic, }) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *Empty if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Deletes the topic with the given name. Returns NOT_FOUND if the topic does not exist. After a topic is deleted, a new topic may be created with the same name; this is an entirely new topic with none of the old configuration or subscriptions. Existing subscriptions to this topic are not deleted.", // "httpMethod": "DELETE", // "id": "pubsub.projects.topics.delete", // "parameterOrder": [ // "topic" // ], // "parameters": { // "topic": { // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "{+topic}", // "response": { // "$ref": "Empty" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/pubsub" // ] // } } // method id "pubsub.projects.topics.get": type ProjectsTopicsGetCall struct { s *Service topic string opt_ map[string]interface{} } // Get: Gets the configuration of a topic. func (r *ProjectsTopicsService) Get(topic string) *ProjectsTopicsGetCall { c := &ProjectsTopicsGetCall{s: r.s, opt_: make(map[string]interface{})} c.topic = topic return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsTopicsGetCall) Fields(s ...googleapi.Field) *ProjectsTopicsGetCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ProjectsTopicsGetCall) Do() (*Topic, error) { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "{+topic}") urls += "?" + params.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.Expand(req.URL, map[string]string{ "topic": c.topic, }) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *Topic if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Gets the configuration of a topic.", // "httpMethod": "GET", // "id": "pubsub.projects.topics.get", // "parameterOrder": [ // "topic" // ], // "parameters": { // "topic": { // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "{+topic}", // "response": { // "$ref": "Topic" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/pubsub" // ] // } } // method id "pubsub.projects.topics.list": type ProjectsTopicsListCall struct { s *Service project string opt_ map[string]interface{} } // List: Lists matching topics. func (r *ProjectsTopicsService) List(project string) *ProjectsTopicsListCall { c := &ProjectsTopicsListCall{s: r.s, opt_: make(map[string]interface{})} c.project = project return c } // PageSize sets the optional parameter "pageSize": func (c *ProjectsTopicsListCall) PageSize(pageSize int64) *ProjectsTopicsListCall { c.opt_["pageSize"] = pageSize return c } // PageToken sets the optional parameter "pageToken": func (c *ProjectsTopicsListCall) PageToken(pageToken string) *ProjectsTopicsListCall { c.opt_["pageToken"] = pageToken return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsTopicsListCall) Fields(s ...googleapi.Field) *ProjectsTopicsListCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ProjectsTopicsListCall) Do() (*ListTopicsResponse, error) { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["pageSize"]; ok { params.Set("pageSize", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["pageToken"]; ok { params.Set("pageToken", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "{+project}/topics") urls += "?" + params.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *ListTopicsResponse if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Lists matching topics.", // "httpMethod": "GET", // "id": "pubsub.projects.topics.list", // "parameterOrder": [ // "project" // ], // "parameters": { // "pageSize": { // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { // "location": "query", // "type": "string" // }, // "project": { // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "{+project}/topics", // "response": { // "$ref": "ListTopicsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/pubsub" // ] // } } // method id "pubsub.projects.topics.publish": type ProjectsTopicsPublishCall struct { s *Service topic string publishrequest *PublishRequest opt_ map[string]interface{} } // Publish: Adds one or more messages to the topic. Returns NOT_FOUND if // the topic does not exist. func (r *ProjectsTopicsService) Publish(topic string, publishrequest *PublishRequest) *ProjectsTopicsPublishCall { c := &ProjectsTopicsPublishCall{s: r.s, opt_: make(map[string]interface{})} c.topic = topic c.publishrequest = publishrequest return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsTopicsPublishCall) Fields(s ...googleapi.Field) *ProjectsTopicsPublishCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ProjectsTopicsPublishCall) Do() (*PublishResponse, error) { var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.publishrequest) if err != nil { return nil, err } ctype := "application/json" params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "{+topic}:publish") urls += "?" + params.Encode() req, _ := http.NewRequest("POST", urls, body) googleapi.Expand(req.URL, map[string]string{ "topic": c.topic, }) req.Header.Set("Content-Type", ctype) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *PublishResponse if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Adds one or more messages to the topic. Returns NOT_FOUND if the topic does not exist.", // "httpMethod": "POST", // "id": "pubsub.projects.topics.publish", // "parameterOrder": [ // "topic" // ], // "parameters": { // "topic": { // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "{+topic}:publish", // "request": { // "$ref": "PublishRequest" // }, // "response": { // "$ref": "PublishResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/pubsub" // ] // } } // method id "pubsub.projects.topics.subscriptions.list": type ProjectsTopicsSubscriptionsListCall struct { s *Service topic string opt_ map[string]interface{} } // List: Lists the name of the subscriptions for this topic. func (r *ProjectsTopicsSubscriptionsService) List(topic string) *ProjectsTopicsSubscriptionsListCall { c := &ProjectsTopicsSubscriptionsListCall{s: r.s, opt_: make(map[string]interface{})} c.topic = topic return c } // PageSize sets the optional parameter "pageSize": func (c *ProjectsTopicsSubscriptionsListCall) PageSize(pageSize int64) *ProjectsTopicsSubscriptionsListCall { c.opt_["pageSize"] = pageSize return c } // PageToken sets the optional parameter "pageToken": func (c *ProjectsTopicsSubscriptionsListCall) PageToken(pageToken string) *ProjectsTopicsSubscriptionsListCall { c.opt_["pageToken"] = pageToken return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsTopicsSubscriptionsListCall) Fields(s ...googleapi.Field) *ProjectsTopicsSubscriptionsListCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ProjectsTopicsSubscriptionsListCall) Do() (*ListTopicSubscriptionsResponse, error) { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["pageSize"]; ok { params.Set("pageSize", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["pageToken"]; ok { params.Set("pageToken", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "{+topic}/subscriptions") urls += "?" + params.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.Expand(req.URL, map[string]string{ "topic": c.topic, }) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *ListTopicSubscriptionsResponse if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Lists the name of the subscriptions for this topic.", // "httpMethod": "GET", // "id": "pubsub.projects.topics.subscriptions.list", // "parameterOrder": [ // "topic" // ], // "parameters": { // "pageSize": { // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { // "location": "query", // "type": "string" // }, // "topic": { // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "{+topic}/subscriptions", // "response": { // "$ref": "ListTopicSubscriptionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/pubsub" // ] // } } ================================================ FILE: vendor/google.golang.org/api/storage/v1/storage-api.json ================================================ { "kind": "discovery#restDescription", "etag": "\"ye6orv2F-1npMW3u9suM3a7C5Bo/lTxRjj5-AURGfd9glUYk42wgbOA\"", "discoveryVersion": "v1", "id": "storage:v1", "name": "storage", "version": "v1", "revision": "20150326", "title": "Cloud Storage API", "description": "Lets you store and retrieve potentially-large, immutable data objects.", "ownerDomain": "google.com", "ownerName": "Google", "icons": { "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" }, "documentationLink": "https://developers.google.com/storage/docs/json_api/", "labels": [ "labs" ], "protocol": "rest", "baseUrl": "https://www.googleapis.com/storage/v1/", "basePath": "/storage/v1/", "rootUrl": "https://www.googleapis.com/", "servicePath": "storage/v1/", "batchPath": "batch", "parameters": { "alt": { "type": "string", "description": "Data format for the response.", "default": "json", "enum": [ "json" ], "enumDescriptions": [ "Responses with Content-Type of application/json" ], "location": "query" }, "fields": { "type": "string", "description": "Selector specifying which fields to include in a partial response.", "location": "query" }, "key": { "type": "string", "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", "location": "query" }, "oauth_token": { "type": "string", "description": "OAuth 2.0 token for the current user.", "location": "query" }, "prettyPrint": { "type": "boolean", "description": "Returns response with indentations and line breaks.", "default": "true", "location": "query" }, "quotaUser": { "type": "string", "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.", "location": "query" }, "userIp": { "type": "string", "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.", "location": "query" } }, "auth": { "oauth2": { "scopes": { "https://www.googleapis.com/auth/cloud-platform": { "description": "View and manage your data across Google Cloud Platform services" }, "https://www.googleapis.com/auth/devstorage.full_control": { "description": "Manage your data and permissions in Google Cloud Storage" }, "https://www.googleapis.com/auth/devstorage.read_only": { "description": "View your data in Google Cloud Storage" }, "https://www.googleapis.com/auth/devstorage.read_write": { "description": "Manage your data in Google Cloud Storage" } } } }, "schemas": { "Bucket": { "id": "Bucket", "type": "object", "description": "A bucket.", "properties": { "acl": { "type": "array", "description": "Access controls on the bucket.", "items": { "$ref": "BucketAccessControl" }, "annotations": { "required": [ "storage.buckets.update" ] } }, "cors": { "type": "array", "description": "The bucket's Cross-Origin Resource Sharing (CORS) configuration.", "items": { "type": "object", "properties": { "maxAgeSeconds": { "type": "integer", "description": "The value, in seconds, to return in the Access-Control-Max-Age header used in preflight responses.", "format": "int32" }, "method": { "type": "array", "description": "The list of HTTP methods on which to include CORS response headers, (GET, OPTIONS, POST, etc) Note: \"*\" is permitted in the list of methods, and means \"any method\".", "items": { "type": "string" } }, "origin": { "type": "array", "description": "The list of Origins eligible to receive CORS response headers. Note: \"*\" is permitted in the list of origins, and means \"any Origin\".", "items": { "type": "string" } }, "responseHeader": { "type": "array", "description": "The list of HTTP headers other than the simple response headers to give permission for the user-agent to share across domains.", "items": { "type": "string" } } } } }, "defaultObjectAcl": { "type": "array", "description": "Default access controls to apply to new objects when no ACL is provided.", "items": { "$ref": "ObjectAccessControl" } }, "etag": { "type": "string", "description": "HTTP 1.1 Entity tag for the bucket." }, "id": { "type": "string", "description": "The ID of the bucket." }, "kind": { "type": "string", "description": "The kind of item this is. For buckets, this is always storage#bucket.", "default": "storage#bucket" }, "lifecycle": { "type": "object", "description": "The bucket's lifecycle configuration. See lifecycle management for more information.", "properties": { "rule": { "type": "array", "description": "A lifecycle management rule, which is made of an action to take and the condition(s) under which the action will be taken.", "items": { "type": "object", "properties": { "action": { "type": "object", "description": "The action to take.", "properties": { "type": { "type": "string", "description": "Type of the action. Currently, only Delete is supported." } } }, "condition": { "type": "object", "description": "The condition(s) under which the action will be taken.", "properties": { "age": { "type": "integer", "description": "Age of an object (in days). This condition is satisfied when an object reaches the specified age.", "format": "int32" }, "createdBefore": { "type": "string", "description": "A date in RFC 3339 format with only the date part (for instance, \"2013-01-15\"). This condition is satisfied when an object is created before midnight of the specified date in UTC.", "format": "date" }, "isLive": { "type": "boolean", "description": "Relevant only for versioned objects. If the value is true, this condition matches live objects; if the value is false, it matches archived objects." }, "numNewerVersions": { "type": "integer", "description": "Relevant only for versioned objects. If the value is N, this condition is satisfied when there are at least N versions (including the live version) newer than this version of the object.", "format": "int32" } } } } } } } }, "location": { "type": "string", "description": "The location of the bucket. Object data for objects in the bucket resides in physical storage within this region. Defaults to US. See the developer's guide for the authoritative list." }, "logging": { "type": "object", "description": "The bucket's logging configuration, which defines the destination bucket and optional name prefix for the current bucket's logs.", "properties": { "logBucket": { "type": "string", "description": "The destination bucket where the current bucket's logs should be placed." }, "logObjectPrefix": { "type": "string", "description": "A prefix for log object names." } } }, "metageneration": { "type": "string", "description": "The metadata generation of this bucket.", "format": "int64" }, "name": { "type": "string", "description": "The name of the bucket.", "annotations": { "required": [ "storage.buckets.insert" ] } }, "owner": { "type": "object", "description": "The owner of the bucket. This is always the project team's owner group.", "properties": { "entity": { "type": "string", "description": "The entity, in the form project-owner-projectId." }, "entityId": { "type": "string", "description": "The ID for the entity." } } }, "projectNumber": { "type": "string", "description": "The project number of the project the bucket belongs to.", "format": "uint64" }, "selfLink": { "type": "string", "description": "The URI of this bucket." }, "storageClass": { "type": "string", "description": "The bucket's storage class. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include STANDARD, NEARLINE and DURABLE_REDUCED_AVAILABILITY. Defaults to STANDARD. For more information, see storage classes." }, "timeCreated": { "type": "string", "description": "Creation time of the bucket in RFC 3339 format.", "format": "date-time" }, "versioning": { "type": "object", "description": "The bucket's versioning configuration.", "properties": { "enabled": { "type": "boolean", "description": "While set to true, versioning is fully enabled for this bucket." } } }, "website": { "type": "object", "description": "The bucket's website configuration.", "properties": { "mainPageSuffix": { "type": "string", "description": "Behaves as the bucket's directory index where missing objects are treated as potential directories." }, "notFoundPage": { "type": "string", "description": "The custom object to return when a requested resource is not found." } } } } }, "BucketAccessControl": { "id": "BucketAccessControl", "type": "object", "description": "An access-control entry.", "properties": { "bucket": { "type": "string", "description": "The name of the bucket." }, "domain": { "type": "string", "description": "The domain associated with the entity, if any." }, "email": { "type": "string", "description": "The email address associated with the entity, if any." }, "entity": { "type": "string", "description": "The entity holding the permission, in one of the following forms: \n- user-userId \n- user-email \n- group-groupId \n- group-email \n- domain-domain \n- project-team-projectId \n- allUsers \n- allAuthenticatedUsers Examples: \n- The user liz@example.com would be user-liz@example.com. \n- The group example@googlegroups.com would be group-example@googlegroups.com. \n- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com.", "annotations": { "required": [ "storage.bucketAccessControls.insert" ] } }, "entityId": { "type": "string", "description": "The ID for the entity, if any." }, "etag": { "type": "string", "description": "HTTP 1.1 Entity tag for the access-control entry." }, "id": { "type": "string", "description": "The ID of the access-control entry." }, "kind": { "type": "string", "description": "The kind of item this is. For bucket access control entries, this is always storage#bucketAccessControl.", "default": "storage#bucketAccessControl" }, "projectTeam": { "type": "object", "description": "The project team associated with the entity, if any.", "properties": { "projectNumber": { "type": "string", "description": "The project number." }, "team": { "type": "string", "description": "The team. Can be owners, editors, or viewers." } } }, "role": { "type": "string", "description": "The access permission for the entity. Can be READER, WRITER, or OWNER.", "annotations": { "required": [ "storage.bucketAccessControls.insert" ] } }, "selfLink": { "type": "string", "description": "The link to this access-control entry." } } }, "BucketAccessControls": { "id": "BucketAccessControls", "type": "object", "description": "An access-control list.", "properties": { "items": { "type": "array", "description": "The list of items.", "items": { "$ref": "BucketAccessControl" } }, "kind": { "type": "string", "description": "The kind of item this is. For lists of bucket access control entries, this is always storage#bucketAccessControls.", "default": "storage#bucketAccessControls" } } }, "Buckets": { "id": "Buckets", "type": "object", "description": "A list of buckets.", "properties": { "items": { "type": "array", "description": "The list of items.", "items": { "$ref": "Bucket" } }, "kind": { "type": "string", "description": "The kind of item this is. For lists of buckets, this is always storage#buckets.", "default": "storage#buckets" }, "nextPageToken": { "type": "string", "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results." } } }, "Channel": { "id": "Channel", "type": "object", "description": "An notification channel used to watch for resource changes.", "properties": { "address": { "type": "string", "description": "The address where notifications are delivered for this channel." }, "expiration": { "type": "string", "description": "Date and time of notification channel expiration, expressed as a Unix timestamp, in milliseconds. Optional.", "format": "int64" }, "id": { "type": "string", "description": "A UUID or similar unique string that identifies this channel." }, "kind": { "type": "string", "description": "Identifies this as a notification channel used to watch for changes to a resource. Value: the fixed string \"api#channel\".", "default": "api#channel" }, "params": { "type": "object", "description": "Additional parameters controlling delivery channel behavior. Optional.", "additionalProperties": { "type": "string", "description": "Declares a new parameter by name." } }, "payload": { "type": "boolean", "description": "A Boolean value to indicate whether payload is wanted. Optional." }, "resourceId": { "type": "string", "description": "An opaque ID that identifies the resource being watched on this channel. Stable across different API versions." }, "resourceUri": { "type": "string", "description": "A version-specific identifier for the watched resource." }, "token": { "type": "string", "description": "An arbitrary string delivered to the target address with each notification delivered over this channel. Optional." }, "type": { "type": "string", "description": "The type of delivery mechanism used for this channel." } } }, "ComposeRequest": { "id": "ComposeRequest", "type": "object", "description": "A Compose request.", "properties": { "destination": { "$ref": "Object", "description": "Properties of the resulting object." }, "kind": { "type": "string", "description": "The kind of item this is.", "default": "storage#composeRequest" }, "sourceObjects": { "type": "array", "description": "The list of source objects that will be concatenated into a single object.", "items": { "type": "object", "properties": { "generation": { "type": "string", "description": "The generation of this object to use as the source.", "format": "int64" }, "name": { "type": "string", "description": "The source object's name. The source object's bucket is implicitly the destination bucket.", "annotations": { "required": [ "storage.objects.compose" ] } }, "objectPreconditions": { "type": "object", "description": "Conditions that must be met for this operation to execute.", "properties": { "ifGenerationMatch": { "type": "string", "description": "Only perform the composition if the generation of the source object that would be used matches this value. If this value and a generation are both specified, they must be the same value or the call will fail.", "format": "int64" } } } } }, "annotations": { "required": [ "storage.objects.compose" ] } } } }, "Object": { "id": "Object", "type": "object", "description": "An object.", "properties": { "acl": { "type": "array", "description": "Access controls on the object.", "items": { "$ref": "ObjectAccessControl" }, "annotations": { "required": [ "storage.objects.update" ] } }, "bucket": { "type": "string", "description": "The name of the bucket containing this object." }, "cacheControl": { "type": "string", "description": "Cache-Control directive for the object data." }, "componentCount": { "type": "integer", "description": "Number of underlying components that make up this object. Components are accumulated by compose operations.", "format": "int32" }, "contentDisposition": { "type": "string", "description": "Content-Disposition of the object data." }, "contentEncoding": { "type": "string", "description": "Content-Encoding of the object data." }, "contentLanguage": { "type": "string", "description": "Content-Language of the object data." }, "contentType": { "type": "string", "description": "Content-Type of the object data.", "annotations": { "required": [ "storage.objects.update" ] } }, "crc32c": { "type": "string", "description": "CRC32c checksum, as described in RFC 4960, Appendix B; encoded using base64 in big-endian byte order." }, "etag": { "type": "string", "description": "HTTP 1.1 Entity tag for the object." }, "generation": { "type": "string", "description": "The content generation of this object. Used for object versioning.", "format": "int64" }, "id": { "type": "string", "description": "The ID of the object." }, "kind": { "type": "string", "description": "The kind of item this is. For objects, this is always storage#object.", "default": "storage#object" }, "md5Hash": { "type": "string", "description": "MD5 hash of the data; encoded using base64." }, "mediaLink": { "type": "string", "description": "Media download link." }, "metadata": { "type": "object", "description": "User-provided metadata, in key/value pairs.", "additionalProperties": { "type": "string", "description": "An individual metadata entry." } }, "metageneration": { "type": "string", "description": "The version of the metadata for this object at this generation. Used for preconditions and for detecting changes in metadata. A metageneration number is only meaningful in the context of a particular generation of a particular object.", "format": "int64" }, "name": { "type": "string", "description": "The name of this object. Required if not specified by URL parameter." }, "owner": { "type": "object", "description": "The owner of the object. This will always be the uploader of the object.", "properties": { "entity": { "type": "string", "description": "The entity, in the form user-userId." }, "entityId": { "type": "string", "description": "The ID for the entity." } } }, "selfLink": { "type": "string", "description": "The link to this object." }, "size": { "type": "string", "description": "Content-Length of the data in bytes.", "format": "uint64" }, "storageClass": { "type": "string", "description": "Storage class of the object." }, "timeDeleted": { "type": "string", "description": "The deletion time of the object in RFC 3339 format. Will be returned if and only if this version of the object has been deleted.", "format": "date-time" }, "updated": { "type": "string", "description": "The creation or modification time of the object in RFC 3339 format. For buckets with versioning enabled, changing an object's metadata does not change this property.", "format": "date-time" } } }, "ObjectAccessControl": { "id": "ObjectAccessControl", "type": "object", "description": "An access-control entry.", "properties": { "bucket": { "type": "string", "description": "The name of the bucket." }, "domain": { "type": "string", "description": "The domain associated with the entity, if any." }, "email": { "type": "string", "description": "The email address associated with the entity, if any." }, "entity": { "type": "string", "description": "The entity holding the permission, in one of the following forms: \n- user-userId \n- user-email \n- group-groupId \n- group-email \n- domain-domain \n- project-team-projectId \n- allUsers \n- allAuthenticatedUsers Examples: \n- The user liz@example.com would be user-liz@example.com. \n- The group example@googlegroups.com would be group-example@googlegroups.com. \n- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com." }, "entityId": { "type": "string", "description": "The ID for the entity, if any." }, "etag": { "type": "string", "description": "HTTP 1.1 Entity tag for the access-control entry." }, "generation": { "type": "string", "description": "The content generation of the object.", "format": "int64" }, "id": { "type": "string", "description": "The ID of the access-control entry." }, "kind": { "type": "string", "description": "The kind of item this is. For object access control entries, this is always storage#objectAccessControl.", "default": "storage#objectAccessControl" }, "object": { "type": "string", "description": "The name of the object." }, "projectTeam": { "type": "object", "description": "The project team associated with the entity, if any.", "properties": { "projectNumber": { "type": "string", "description": "The project number." }, "team": { "type": "string", "description": "The team. Can be owners, editors, or viewers." } } }, "role": { "type": "string", "description": "The access permission for the entity. Can be READER or OWNER." }, "selfLink": { "type": "string", "description": "The link to this access-control entry." } } }, "ObjectAccessControls": { "id": "ObjectAccessControls", "type": "object", "description": "An access-control list.", "properties": { "items": { "type": "array", "description": "The list of items.", "items": { "type": "any" } }, "kind": { "type": "string", "description": "The kind of item this is. For lists of object access control entries, this is always storage#objectAccessControls.", "default": "storage#objectAccessControls" } } }, "Objects": { "id": "Objects", "type": "object", "description": "A list of objects.", "properties": { "items": { "type": "array", "description": "The list of items.", "items": { "$ref": "Object" } }, "kind": { "type": "string", "description": "The kind of item this is. For lists of objects, this is always storage#objects.", "default": "storage#objects" }, "nextPageToken": { "type": "string", "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results." }, "prefixes": { "type": "array", "description": "The list of prefixes of objects matching-but-not-listed up to and including the requested delimiter.", "items": { "type": "string" } } } }, "RewriteResponse": { "id": "RewriteResponse", "type": "object", "description": "A rewrite response.", "properties": { "done": { "type": "boolean", "description": "true if the copy is finished; otherwise, false if the copy is in progress. This property is always present in the response." }, "kind": { "type": "string", "description": "The kind of item this is.", "default": "storage#rewriteResponse" }, "objectSize": { "type": "string", "description": "The total size of the object being copied in bytes. This property is always present in the response.", "format": "uint64" }, "resource": { "$ref": "Object", "description": "A resource containing the metadata for the copied-to object. This property is present in the response only when copying completes." }, "rewriteToken": { "type": "string", "description": "A token to use in subsequent requests to continue copying data. This token is present in the response only when there is more data to copy." }, "totalBytesRewritten": { "type": "string", "description": "The total bytes written so far, which can be used to provide a waiting user with a progress indicator. This property is always present in the response.", "format": "uint64" } } } }, "resources": { "bucketAccessControls": { "methods": { "delete": { "id": "storage.bucketAccessControls.delete", "path": "b/{bucket}/acl/{entity}", "httpMethod": "DELETE", "description": "Permanently deletes the ACL entry for the specified entity on the specified bucket.", "parameters": { "bucket": { "type": "string", "description": "Name of a bucket.", "required": true, "location": "path" }, "entity": { "type": "string", "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", "required": true, "location": "path" } }, "parameterOrder": [ "bucket", "entity" ], "scopes": [ "https://www.googleapis.com/auth/devstorage.full_control" ] }, "get": { "id": "storage.bucketAccessControls.get", "path": "b/{bucket}/acl/{entity}", "httpMethod": "GET", "description": "Returns the ACL entry for the specified entity on the specified bucket.", "parameters": { "bucket": { "type": "string", "description": "Name of a bucket.", "required": true, "location": "path" }, "entity": { "type": "string", "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", "required": true, "location": "path" } }, "parameterOrder": [ "bucket", "entity" ], "response": { "$ref": "BucketAccessControl" }, "scopes": [ "https://www.googleapis.com/auth/devstorage.full_control" ] }, "insert": { "id": "storage.bucketAccessControls.insert", "path": "b/{bucket}/acl", "httpMethod": "POST", "description": "Creates a new ACL entry on the specified bucket.", "parameters": { "bucket": { "type": "string", "description": "Name of a bucket.", "required": true, "location": "path" } }, "parameterOrder": [ "bucket" ], "request": { "$ref": "BucketAccessControl" }, "response": { "$ref": "BucketAccessControl" }, "scopes": [ "https://www.googleapis.com/auth/devstorage.full_control" ] }, "list": { "id": "storage.bucketAccessControls.list", "path": "b/{bucket}/acl", "httpMethod": "GET", "description": "Retrieves ACL entries on the specified bucket.", "parameters": { "bucket": { "type": "string", "description": "Name of a bucket.", "required": true, "location": "path" } }, "parameterOrder": [ "bucket" ], "response": { "$ref": "BucketAccessControls" }, "scopes": [ "https://www.googleapis.com/auth/devstorage.full_control" ] }, "patch": { "id": "storage.bucketAccessControls.patch", "path": "b/{bucket}/acl/{entity}", "httpMethod": "PATCH", "description": "Updates an ACL entry on the specified bucket. This method supports patch semantics.", "parameters": { "bucket": { "type": "string", "description": "Name of a bucket.", "required": true, "location": "path" }, "entity": { "type": "string", "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", "required": true, "location": "path" } }, "parameterOrder": [ "bucket", "entity" ], "request": { "$ref": "BucketAccessControl" }, "response": { "$ref": "BucketAccessControl" }, "scopes": [ "https://www.googleapis.com/auth/devstorage.full_control" ] }, "update": { "id": "storage.bucketAccessControls.update", "path": "b/{bucket}/acl/{entity}", "httpMethod": "PUT", "description": "Updates an ACL entry on the specified bucket.", "parameters": { "bucket": { "type": "string", "description": "Name of a bucket.", "required": true, "location": "path" }, "entity": { "type": "string", "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", "required": true, "location": "path" } }, "parameterOrder": [ "bucket", "entity" ], "request": { "$ref": "BucketAccessControl" }, "response": { "$ref": "BucketAccessControl" }, "scopes": [ "https://www.googleapis.com/auth/devstorage.full_control" ] } } }, "buckets": { "methods": { "delete": { "id": "storage.buckets.delete", "path": "b/{bucket}", "httpMethod": "DELETE", "description": "Permanently deletes an empty bucket.", "parameters": { "bucket": { "type": "string", "description": "Name of a bucket.", "required": true, "location": "path" }, "ifMetagenerationMatch": { "type": "string", "description": "If set, only deletes the bucket if its metageneration matches this value.", "format": "int64", "location": "query" }, "ifMetagenerationNotMatch": { "type": "string", "description": "If set, only deletes the bucket if its metageneration does not match this value.", "format": "int64", "location": "query" } }, "parameterOrder": [ "bucket" ], "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.full_control", "https://www.googleapis.com/auth/devstorage.read_write" ] }, "get": { "id": "storage.buckets.get", "path": "b/{bucket}", "httpMethod": "GET", "description": "Returns metadata for the specified bucket.", "parameters": { "bucket": { "type": "string", "description": "Name of a bucket.", "required": true, "location": "path" }, "ifMetagenerationMatch": { "type": "string", "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", "format": "int64", "location": "query" }, "ifMetagenerationNotMatch": { "type": "string", "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", "format": "int64", "location": "query" }, "projection": { "type": "string", "description": "Set of properties to return. Defaults to noAcl.", "enum": [ "full", "noAcl" ], "enumDescriptions": [ "Include all properties.", "Omit acl and defaultObjectAcl properties." ], "location": "query" } }, "parameterOrder": [ "bucket" ], "response": { "$ref": "Bucket" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.full_control", "https://www.googleapis.com/auth/devstorage.read_only", "https://www.googleapis.com/auth/devstorage.read_write" ] }, "insert": { "id": "storage.buckets.insert", "path": "b", "httpMethod": "POST", "description": "Creates a new bucket.", "parameters": { "predefinedAcl": { "type": "string", "description": "Apply a predefined set of access controls to this bucket.", "enum": [ "authenticatedRead", "private", "projectPrivate", "publicRead", "publicReadWrite" ], "enumDescriptions": [ "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", "Project team owners get OWNER access.", "Project team members get access according to their roles.", "Project team owners get OWNER access, and allUsers get READER access.", "Project team owners get OWNER access, and allUsers get WRITER access." ], "location": "query" }, "predefinedDefaultObjectAcl": { "type": "string", "description": "Apply a predefined set of default object access controls to this bucket.", "enum": [ "authenticatedRead", "bucketOwnerFullControl", "bucketOwnerRead", "private", "projectPrivate", "publicRead" ], "enumDescriptions": [ "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", "Object owner gets OWNER access, and project team owners get OWNER access.", "Object owner gets OWNER access, and project team owners get READER access.", "Object owner gets OWNER access.", "Object owner gets OWNER access, and project team members get access according to their roles.", "Object owner gets OWNER access, and allUsers get READER access." ], "location": "query" }, "project": { "type": "string", "description": "A valid API project identifier.", "required": true, "location": "query" }, "projection": { "type": "string", "description": "Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.", "enum": [ "full", "noAcl" ], "enumDescriptions": [ "Include all properties.", "Omit acl and defaultObjectAcl properties." ], "location": "query" } }, "parameterOrder": [ "project" ], "request": { "$ref": "Bucket" }, "response": { "$ref": "Bucket" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.full_control", "https://www.googleapis.com/auth/devstorage.read_write" ] }, "list": { "id": "storage.buckets.list", "path": "b", "httpMethod": "GET", "description": "Retrieves a list of buckets for a given project.", "parameters": { "maxResults": { "type": "integer", "description": "Maximum number of buckets to return.", "format": "uint32", "minimum": "0", "location": "query" }, "pageToken": { "type": "string", "description": "A previously-returned page token representing part of the larger set of results to view.", "location": "query" }, "prefix": { "type": "string", "description": "Filter results to buckets whose names begin with this prefix.", "location": "query" }, "project": { "type": "string", "description": "A valid API project identifier.", "required": true, "location": "query" }, "projection": { "type": "string", "description": "Set of properties to return. Defaults to noAcl.", "enum": [ "full", "noAcl" ], "enumDescriptions": [ "Include all properties.", "Omit acl and defaultObjectAcl properties." ], "location": "query" } }, "parameterOrder": [ "project" ], "response": { "$ref": "Buckets" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.full_control", "https://www.googleapis.com/auth/devstorage.read_only", "https://www.googleapis.com/auth/devstorage.read_write" ] }, "patch": { "id": "storage.buckets.patch", "path": "b/{bucket}", "httpMethod": "PATCH", "description": "Updates a bucket. This method supports patch semantics.", "parameters": { "bucket": { "type": "string", "description": "Name of a bucket.", "required": true, "location": "path" }, "ifMetagenerationMatch": { "type": "string", "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", "format": "int64", "location": "query" }, "ifMetagenerationNotMatch": { "type": "string", "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", "format": "int64", "location": "query" }, "predefinedAcl": { "type": "string", "description": "Apply a predefined set of access controls to this bucket.", "enum": [ "authenticatedRead", "private", "projectPrivate", "publicRead", "publicReadWrite" ], "enumDescriptions": [ "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", "Project team owners get OWNER access.", "Project team members get access according to their roles.", "Project team owners get OWNER access, and allUsers get READER access.", "Project team owners get OWNER access, and allUsers get WRITER access." ], "location": "query" }, "predefinedDefaultObjectAcl": { "type": "string", "description": "Apply a predefined set of default object access controls to this bucket.", "enum": [ "authenticatedRead", "bucketOwnerFullControl", "bucketOwnerRead", "private", "projectPrivate", "publicRead" ], "enumDescriptions": [ "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", "Object owner gets OWNER access, and project team owners get OWNER access.", "Object owner gets OWNER access, and project team owners get READER access.", "Object owner gets OWNER access.", "Object owner gets OWNER access, and project team members get access according to their roles.", "Object owner gets OWNER access, and allUsers get READER access." ], "location": "query" }, "projection": { "type": "string", "description": "Set of properties to return. Defaults to full.", "enum": [ "full", "noAcl" ], "enumDescriptions": [ "Include all properties.", "Omit acl and defaultObjectAcl properties." ], "location": "query" } }, "parameterOrder": [ "bucket" ], "request": { "$ref": "Bucket" }, "response": { "$ref": "Bucket" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.full_control", "https://www.googleapis.com/auth/devstorage.read_write" ] }, "update": { "id": "storage.buckets.update", "path": "b/{bucket}", "httpMethod": "PUT", "description": "Updates a bucket.", "parameters": { "bucket": { "type": "string", "description": "Name of a bucket.", "required": true, "location": "path" }, "ifMetagenerationMatch": { "type": "string", "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", "format": "int64", "location": "query" }, "ifMetagenerationNotMatch": { "type": "string", "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", "format": "int64", "location": "query" }, "predefinedAcl": { "type": "string", "description": "Apply a predefined set of access controls to this bucket.", "enum": [ "authenticatedRead", "private", "projectPrivate", "publicRead", "publicReadWrite" ], "enumDescriptions": [ "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", "Project team owners get OWNER access.", "Project team members get access according to their roles.", "Project team owners get OWNER access, and allUsers get READER access.", "Project team owners get OWNER access, and allUsers get WRITER access." ], "location": "query" }, "predefinedDefaultObjectAcl": { "type": "string", "description": "Apply a predefined set of default object access controls to this bucket.", "enum": [ "authenticatedRead", "bucketOwnerFullControl", "bucketOwnerRead", "private", "projectPrivate", "publicRead" ], "enumDescriptions": [ "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", "Object owner gets OWNER access, and project team owners get OWNER access.", "Object owner gets OWNER access, and project team owners get READER access.", "Object owner gets OWNER access.", "Object owner gets OWNER access, and project team members get access according to their roles.", "Object owner gets OWNER access, and allUsers get READER access." ], "location": "query" }, "projection": { "type": "string", "description": "Set of properties to return. Defaults to full.", "enum": [ "full", "noAcl" ], "enumDescriptions": [ "Include all properties.", "Omit acl and defaultObjectAcl properties." ], "location": "query" } }, "parameterOrder": [ "bucket" ], "request": { "$ref": "Bucket" }, "response": { "$ref": "Bucket" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.full_control", "https://www.googleapis.com/auth/devstorage.read_write" ] } } }, "channels": { "methods": { "stop": { "id": "storage.channels.stop", "path": "channels/stop", "httpMethod": "POST", "description": "Stop watching resources through this channel", "request": { "$ref": "Channel", "parameterName": "resource" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.full_control", "https://www.googleapis.com/auth/devstorage.read_only", "https://www.googleapis.com/auth/devstorage.read_write" ] } } }, "defaultObjectAccessControls": { "methods": { "delete": { "id": "storage.defaultObjectAccessControls.delete", "path": "b/{bucket}/defaultObjectAcl/{entity}", "httpMethod": "DELETE", "description": "Permanently deletes the default object ACL entry for the specified entity on the specified bucket.", "parameters": { "bucket": { "type": "string", "description": "Name of a bucket.", "required": true, "location": "path" }, "entity": { "type": "string", "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", "required": true, "location": "path" } }, "parameterOrder": [ "bucket", "entity" ], "scopes": [ "https://www.googleapis.com/auth/devstorage.full_control" ] }, "get": { "id": "storage.defaultObjectAccessControls.get", "path": "b/{bucket}/defaultObjectAcl/{entity}", "httpMethod": "GET", "description": "Returns the default object ACL entry for the specified entity on the specified bucket.", "parameters": { "bucket": { "type": "string", "description": "Name of a bucket.", "required": true, "location": "path" }, "entity": { "type": "string", "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", "required": true, "location": "path" } }, "parameterOrder": [ "bucket", "entity" ], "response": { "$ref": "ObjectAccessControl" }, "scopes": [ "https://www.googleapis.com/auth/devstorage.full_control" ] }, "insert": { "id": "storage.defaultObjectAccessControls.insert", "path": "b/{bucket}/defaultObjectAcl", "httpMethod": "POST", "description": "Creates a new default object ACL entry on the specified bucket.", "parameters": { "bucket": { "type": "string", "description": "Name of a bucket.", "required": true, "location": "path" } }, "parameterOrder": [ "bucket" ], "request": { "$ref": "ObjectAccessControl" }, "response": { "$ref": "ObjectAccessControl" }, "scopes": [ "https://www.googleapis.com/auth/devstorage.full_control" ] }, "list": { "id": "storage.defaultObjectAccessControls.list", "path": "b/{bucket}/defaultObjectAcl", "httpMethod": "GET", "description": "Retrieves default object ACL entries on the specified bucket.", "parameters": { "bucket": { "type": "string", "description": "Name of a bucket.", "required": true, "location": "path" }, "ifMetagenerationMatch": { "type": "string", "description": "If present, only return default ACL listing if the bucket's current metageneration matches this value.", "format": "int64", "location": "query" }, "ifMetagenerationNotMatch": { "type": "string", "description": "If present, only return default ACL listing if the bucket's current metageneration does not match the given value.", "format": "int64", "location": "query" } }, "parameterOrder": [ "bucket" ], "response": { "$ref": "ObjectAccessControls" }, "scopes": [ "https://www.googleapis.com/auth/devstorage.full_control" ] }, "patch": { "id": "storage.defaultObjectAccessControls.patch", "path": "b/{bucket}/defaultObjectAcl/{entity}", "httpMethod": "PATCH", "description": "Updates a default object ACL entry on the specified bucket. This method supports patch semantics.", "parameters": { "bucket": { "type": "string", "description": "Name of a bucket.", "required": true, "location": "path" }, "entity": { "type": "string", "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", "required": true, "location": "path" } }, "parameterOrder": [ "bucket", "entity" ], "request": { "$ref": "ObjectAccessControl" }, "response": { "$ref": "ObjectAccessControl" }, "scopes": [ "https://www.googleapis.com/auth/devstorage.full_control" ] }, "update": { "id": "storage.defaultObjectAccessControls.update", "path": "b/{bucket}/defaultObjectAcl/{entity}", "httpMethod": "PUT", "description": "Updates a default object ACL entry on the specified bucket.", "parameters": { "bucket": { "type": "string", "description": "Name of a bucket.", "required": true, "location": "path" }, "entity": { "type": "string", "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", "required": true, "location": "path" } }, "parameterOrder": [ "bucket", "entity" ], "request": { "$ref": "ObjectAccessControl" }, "response": { "$ref": "ObjectAccessControl" }, "scopes": [ "https://www.googleapis.com/auth/devstorage.full_control" ] } } }, "objectAccessControls": { "methods": { "delete": { "id": "storage.objectAccessControls.delete", "path": "b/{bucket}/o/{object}/acl/{entity}", "httpMethod": "DELETE", "description": "Permanently deletes the ACL entry for the specified entity on the specified object.", "parameters": { "bucket": { "type": "string", "description": "Name of a bucket.", "required": true, "location": "path" }, "entity": { "type": "string", "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", "required": true, "location": "path" }, "generation": { "type": "string", "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", "format": "int64", "location": "query" }, "object": { "type": "string", "description": "Name of the object.", "required": true, "location": "path" } }, "parameterOrder": [ "bucket", "object", "entity" ], "scopes": [ "https://www.googleapis.com/auth/devstorage.full_control" ] }, "get": { "id": "storage.objectAccessControls.get", "path": "b/{bucket}/o/{object}/acl/{entity}", "httpMethod": "GET", "description": "Returns the ACL entry for the specified entity on the specified object.", "parameters": { "bucket": { "type": "string", "description": "Name of a bucket.", "required": true, "location": "path" }, "entity": { "type": "string", "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", "required": true, "location": "path" }, "generation": { "type": "string", "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", "format": "int64", "location": "query" }, "object": { "type": "string", "description": "Name of the object.", "required": true, "location": "path" } }, "parameterOrder": [ "bucket", "object", "entity" ], "response": { "$ref": "ObjectAccessControl" }, "scopes": [ "https://www.googleapis.com/auth/devstorage.full_control" ] }, "insert": { "id": "storage.objectAccessControls.insert", "path": "b/{bucket}/o/{object}/acl", "httpMethod": "POST", "description": "Creates a new ACL entry on the specified object.", "parameters": { "bucket": { "type": "string", "description": "Name of a bucket.", "required": true, "location": "path" }, "generation": { "type": "string", "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", "format": "int64", "location": "query" }, "object": { "type": "string", "description": "Name of the object.", "required": true, "location": "path" } }, "parameterOrder": [ "bucket", "object" ], "request": { "$ref": "ObjectAccessControl" }, "response": { "$ref": "ObjectAccessControl" }, "scopes": [ "https://www.googleapis.com/auth/devstorage.full_control" ] }, "list": { "id": "storage.objectAccessControls.list", "path": "b/{bucket}/o/{object}/acl", "httpMethod": "GET", "description": "Retrieves ACL entries on the specified object.", "parameters": { "bucket": { "type": "string", "description": "Name of a bucket.", "required": true, "location": "path" }, "generation": { "type": "string", "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", "format": "int64", "location": "query" }, "object": { "type": "string", "description": "Name of the object.", "required": true, "location": "path" } }, "parameterOrder": [ "bucket", "object" ], "response": { "$ref": "ObjectAccessControls" }, "scopes": [ "https://www.googleapis.com/auth/devstorage.full_control" ] }, "patch": { "id": "storage.objectAccessControls.patch", "path": "b/{bucket}/o/{object}/acl/{entity}", "httpMethod": "PATCH", "description": "Updates an ACL entry on the specified object. This method supports patch semantics.", "parameters": { "bucket": { "type": "string", "description": "Name of a bucket.", "required": true, "location": "path" }, "entity": { "type": "string", "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", "required": true, "location": "path" }, "generation": { "type": "string", "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", "format": "int64", "location": "query" }, "object": { "type": "string", "description": "Name of the object.", "required": true, "location": "path" } }, "parameterOrder": [ "bucket", "object", "entity" ], "request": { "$ref": "ObjectAccessControl" }, "response": { "$ref": "ObjectAccessControl" }, "scopes": [ "https://www.googleapis.com/auth/devstorage.full_control" ] }, "update": { "id": "storage.objectAccessControls.update", "path": "b/{bucket}/o/{object}/acl/{entity}", "httpMethod": "PUT", "description": "Updates an ACL entry on the specified object.", "parameters": { "bucket": { "type": "string", "description": "Name of a bucket.", "required": true, "location": "path" }, "entity": { "type": "string", "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", "required": true, "location": "path" }, "generation": { "type": "string", "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", "format": "int64", "location": "query" }, "object": { "type": "string", "description": "Name of the object.", "required": true, "location": "path" } }, "parameterOrder": [ "bucket", "object", "entity" ], "request": { "$ref": "ObjectAccessControl" }, "response": { "$ref": "ObjectAccessControl" }, "scopes": [ "https://www.googleapis.com/auth/devstorage.full_control" ] } } }, "objects": { "methods": { "compose": { "id": "storage.objects.compose", "path": "b/{destinationBucket}/o/{destinationObject}/compose", "httpMethod": "POST", "description": "Concatenates a list of existing objects into a new object in the same bucket.", "parameters": { "destinationBucket": { "type": "string", "description": "Name of the bucket in which to store the new object.", "required": true, "location": "path" }, "destinationObject": { "type": "string", "description": "Name of the new object.", "required": true, "location": "path" }, "destinationPredefinedAcl": { "type": "string", "description": "Apply a predefined set of access controls to the destination object.", "enum": [ "authenticatedRead", "bucketOwnerFullControl", "bucketOwnerRead", "private", "projectPrivate", "publicRead" ], "enumDescriptions": [ "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", "Object owner gets OWNER access, and project team owners get OWNER access.", "Object owner gets OWNER access, and project team owners get READER access.", "Object owner gets OWNER access.", "Object owner gets OWNER access, and project team members get access according to their roles.", "Object owner gets OWNER access, and allUsers get READER access." ], "location": "query" }, "ifGenerationMatch": { "type": "string", "description": "Makes the operation conditional on whether the object's current generation matches the given value.", "format": "int64", "location": "query" }, "ifMetagenerationMatch": { "type": "string", "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", "format": "int64", "location": "query" } }, "parameterOrder": [ "destinationBucket", "destinationObject" ], "request": { "$ref": "ComposeRequest" }, "response": { "$ref": "Object" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.full_control", "https://www.googleapis.com/auth/devstorage.read_write" ], "supportsMediaDownload": true }, "copy": { "id": "storage.objects.copy", "path": "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}", "httpMethod": "POST", "description": "Copies a source object to a destination object. Optionally overrides metadata.", "parameters": { "destinationBucket": { "type": "string", "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", "required": true, "location": "path" }, "destinationObject": { "type": "string", "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.", "required": true, "location": "path" }, "destinationPredefinedAcl": { "type": "string", "description": "Apply a predefined set of access controls to the destination object.", "enum": [ "authenticatedRead", "bucketOwnerFullControl", "bucketOwnerRead", "private", "projectPrivate", "publicRead" ], "enumDescriptions": [ "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", "Object owner gets OWNER access, and project team owners get OWNER access.", "Object owner gets OWNER access, and project team owners get READER access.", "Object owner gets OWNER access.", "Object owner gets OWNER access, and project team members get access according to their roles.", "Object owner gets OWNER access, and allUsers get READER access." ], "location": "query" }, "ifGenerationMatch": { "type": "string", "description": "Makes the operation conditional on whether the destination object's current generation matches the given value.", "format": "int64", "location": "query" }, "ifGenerationNotMatch": { "type": "string", "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value.", "format": "int64", "location": "query" }, "ifMetagenerationMatch": { "type": "string", "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", "format": "int64", "location": "query" }, "ifMetagenerationNotMatch": { "type": "string", "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", "format": "int64", "location": "query" }, "ifSourceGenerationMatch": { "type": "string", "description": "Makes the operation conditional on whether the source object's generation matches the given value.", "format": "int64", "location": "query" }, "ifSourceGenerationNotMatch": { "type": "string", "description": "Makes the operation conditional on whether the source object's generation does not match the given value.", "format": "int64", "location": "query" }, "ifSourceMetagenerationMatch": { "type": "string", "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", "format": "int64", "location": "query" }, "ifSourceMetagenerationNotMatch": { "type": "string", "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", "format": "int64", "location": "query" }, "projection": { "type": "string", "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", "enum": [ "full", "noAcl" ], "enumDescriptions": [ "Include all properties.", "Omit the acl property." ], "location": "query" }, "sourceBucket": { "type": "string", "description": "Name of the bucket in which to find the source object.", "required": true, "location": "path" }, "sourceGeneration": { "type": "string", "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", "format": "int64", "location": "query" }, "sourceObject": { "type": "string", "description": "Name of the source object.", "required": true, "location": "path" } }, "parameterOrder": [ "sourceBucket", "sourceObject", "destinationBucket", "destinationObject" ], "request": { "$ref": "Object" }, "response": { "$ref": "Object" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.full_control", "https://www.googleapis.com/auth/devstorage.read_write" ], "supportsMediaDownload": true }, "delete": { "id": "storage.objects.delete", "path": "b/{bucket}/o/{object}", "httpMethod": "DELETE", "description": "Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.", "parameters": { "bucket": { "type": "string", "description": "Name of the bucket in which the object resides.", "required": true, "location": "path" }, "generation": { "type": "string", "description": "If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).", "format": "int64", "location": "query" }, "ifGenerationMatch": { "type": "string", "description": "Makes the operation conditional on whether the object's current generation matches the given value.", "format": "int64", "location": "query" }, "ifGenerationNotMatch": { "type": "string", "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", "format": "int64", "location": "query" }, "ifMetagenerationMatch": { "type": "string", "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", "format": "int64", "location": "query" }, "ifMetagenerationNotMatch": { "type": "string", "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", "format": "int64", "location": "query" }, "object": { "type": "string", "description": "Name of the object.", "required": true, "location": "path" } }, "parameterOrder": [ "bucket", "object" ], "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.full_control", "https://www.googleapis.com/auth/devstorage.read_write" ] }, "get": { "id": "storage.objects.get", "path": "b/{bucket}/o/{object}", "httpMethod": "GET", "description": "Retrieves an object or its metadata.", "parameters": { "bucket": { "type": "string", "description": "Name of the bucket in which the object resides.", "required": true, "location": "path" }, "generation": { "type": "string", "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", "format": "int64", "location": "query" }, "ifGenerationMatch": { "type": "string", "description": "Makes the operation conditional on whether the object's generation matches the given value.", "format": "int64", "location": "query" }, "ifGenerationNotMatch": { "type": "string", "description": "Makes the operation conditional on whether the object's generation does not match the given value.", "format": "int64", "location": "query" }, "ifMetagenerationMatch": { "type": "string", "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", "format": "int64", "location": "query" }, "ifMetagenerationNotMatch": { "type": "string", "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", "format": "int64", "location": "query" }, "object": { "type": "string", "description": "Name of the object.", "required": true, "location": "path" }, "projection": { "type": "string", "description": "Set of properties to return. Defaults to noAcl.", "enum": [ "full", "noAcl" ], "enumDescriptions": [ "Include all properties.", "Omit the acl property." ], "location": "query" } }, "parameterOrder": [ "bucket", "object" ], "response": { "$ref": "Object" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.full_control", "https://www.googleapis.com/auth/devstorage.read_only", "https://www.googleapis.com/auth/devstorage.read_write" ], "supportsMediaDownload": true }, "insert": { "id": "storage.objects.insert", "path": "b/{bucket}/o", "httpMethod": "POST", "description": "Stores a new object and metadata.", "parameters": { "bucket": { "type": "string", "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", "required": true, "location": "path" }, "contentEncoding": { "type": "string", "description": "If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.", "location": "query" }, "ifGenerationMatch": { "type": "string", "description": "Makes the operation conditional on whether the object's current generation matches the given value.", "format": "int64", "location": "query" }, "ifGenerationNotMatch": { "type": "string", "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", "format": "int64", "location": "query" }, "ifMetagenerationMatch": { "type": "string", "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", "format": "int64", "location": "query" }, "ifMetagenerationNotMatch": { "type": "string", "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", "format": "int64", "location": "query" }, "name": { "type": "string", "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.", "location": "query" }, "predefinedAcl": { "type": "string", "description": "Apply a predefined set of access controls to this object.", "enum": [ "authenticatedRead", "bucketOwnerFullControl", "bucketOwnerRead", "private", "projectPrivate", "publicRead" ], "enumDescriptions": [ "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", "Object owner gets OWNER access, and project team owners get OWNER access.", "Object owner gets OWNER access, and project team owners get READER access.", "Object owner gets OWNER access.", "Object owner gets OWNER access, and project team members get access according to their roles.", "Object owner gets OWNER access, and allUsers get READER access." ], "location": "query" }, "projection": { "type": "string", "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", "enum": [ "full", "noAcl" ], "enumDescriptions": [ "Include all properties.", "Omit the acl property." ], "location": "query" } }, "parameterOrder": [ "bucket" ], "request": { "$ref": "Object" }, "response": { "$ref": "Object" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.full_control", "https://www.googleapis.com/auth/devstorage.read_write" ], "supportsMediaDownload": true, "supportsMediaUpload": true, "mediaUpload": { "accept": [ "*/*" ], "protocols": { "simple": { "multipart": true, "path": "/upload/storage/v1/b/{bucket}/o" }, "resumable": { "multipart": true, "path": "/resumable/upload/storage/v1/b/{bucket}/o" } } } }, "list": { "id": "storage.objects.list", "path": "b/{bucket}/o", "httpMethod": "GET", "description": "Retrieves a list of objects matching the criteria.", "parameters": { "bucket": { "type": "string", "description": "Name of the bucket in which to look for objects.", "required": true, "location": "path" }, "delimiter": { "type": "string", "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", "location": "query" }, "maxResults": { "type": "integer", "description": "Maximum number of items plus prefixes to return. As duplicate prefixes are omitted, fewer total results may be returned than requested. The default value of this parameter is 1,000 items.", "format": "uint32", "minimum": "0", "location": "query" }, "pageToken": { "type": "string", "description": "A previously-returned page token representing part of the larger set of results to view.", "location": "query" }, "prefix": { "type": "string", "description": "Filter results to objects whose names begin with this prefix.", "location": "query" }, "projection": { "type": "string", "description": "Set of properties to return. Defaults to noAcl.", "enum": [ "full", "noAcl" ], "enumDescriptions": [ "Include all properties.", "Omit the acl property." ], "location": "query" }, "versions": { "type": "boolean", "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", "location": "query" } }, "parameterOrder": [ "bucket" ], "response": { "$ref": "Objects" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.full_control", "https://www.googleapis.com/auth/devstorage.read_only", "https://www.googleapis.com/auth/devstorage.read_write" ], "supportsSubscription": true }, "patch": { "id": "storage.objects.patch", "path": "b/{bucket}/o/{object}", "httpMethod": "PATCH", "description": "Updates an object's metadata. This method supports patch semantics.", "parameters": { "bucket": { "type": "string", "description": "Name of the bucket in which the object resides.", "required": true, "location": "path" }, "generation": { "type": "string", "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", "format": "int64", "location": "query" }, "ifGenerationMatch": { "type": "string", "description": "Makes the operation conditional on whether the object's current generation matches the given value.", "format": "int64", "location": "query" }, "ifGenerationNotMatch": { "type": "string", "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", "format": "int64", "location": "query" }, "ifMetagenerationMatch": { "type": "string", "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", "format": "int64", "location": "query" }, "ifMetagenerationNotMatch": { "type": "string", "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", "format": "int64", "location": "query" }, "object": { "type": "string", "description": "Name of the object.", "required": true, "location": "path" }, "predefinedAcl": { "type": "string", "description": "Apply a predefined set of access controls to this object.", "enum": [ "authenticatedRead", "bucketOwnerFullControl", "bucketOwnerRead", "private", "projectPrivate", "publicRead" ], "enumDescriptions": [ "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", "Object owner gets OWNER access, and project team owners get OWNER access.", "Object owner gets OWNER access, and project team owners get READER access.", "Object owner gets OWNER access.", "Object owner gets OWNER access, and project team members get access according to their roles.", "Object owner gets OWNER access, and allUsers get READER access." ], "location": "query" }, "projection": { "type": "string", "description": "Set of properties to return. Defaults to full.", "enum": [ "full", "noAcl" ], "enumDescriptions": [ "Include all properties.", "Omit the acl property." ], "location": "query" } }, "parameterOrder": [ "bucket", "object" ], "request": { "$ref": "Object" }, "response": { "$ref": "Object" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.full_control", "https://www.googleapis.com/auth/devstorage.read_write" ] }, "rewrite": { "id": "storage.objects.rewrite", "path": "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}", "httpMethod": "POST", "description": "Rewrites a source object to a destination object. Optionally overrides metadata.", "parameters": { "destinationBucket": { "type": "string", "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", "required": true, "location": "path" }, "destinationObject": { "type": "string", "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.", "required": true, "location": "path" }, "destinationPredefinedAcl": { "type": "string", "description": "Apply a predefined set of access controls to the destination object.", "enum": [ "authenticatedRead", "bucketOwnerFullControl", "bucketOwnerRead", "private", "projectPrivate", "publicRead" ], "enumDescriptions": [ "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", "Object owner gets OWNER access, and project team owners get OWNER access.", "Object owner gets OWNER access, and project team owners get READER access.", "Object owner gets OWNER access.", "Object owner gets OWNER access, and project team members get access according to their roles.", "Object owner gets OWNER access, and allUsers get READER access." ], "location": "query" }, "ifGenerationMatch": { "type": "string", "description": "Makes the operation conditional on whether the destination object's current generation matches the given value.", "format": "int64", "location": "query" }, "ifGenerationNotMatch": { "type": "string", "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value.", "format": "int64", "location": "query" }, "ifMetagenerationMatch": { "type": "string", "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", "format": "int64", "location": "query" }, "ifMetagenerationNotMatch": { "type": "string", "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", "format": "int64", "location": "query" }, "ifSourceGenerationMatch": { "type": "string", "description": "Makes the operation conditional on whether the source object's generation matches the given value.", "format": "int64", "location": "query" }, "ifSourceGenerationNotMatch": { "type": "string", "description": "Makes the operation conditional on whether the source object's generation does not match the given value.", "format": "int64", "location": "query" }, "ifSourceMetagenerationMatch": { "type": "string", "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", "format": "int64", "location": "query" }, "ifSourceMetagenerationNotMatch": { "type": "string", "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", "format": "int64", "location": "query" }, "maxBytesRewrittenPerCall": { "type": "string", "description": "The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.", "format": "int64", "location": "query" }, "projection": { "type": "string", "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", "enum": [ "full", "noAcl" ], "enumDescriptions": [ "Include all properties.", "Omit the acl property." ], "location": "query" }, "rewriteToken": { "type": "string", "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.", "location": "query" }, "sourceBucket": { "type": "string", "description": "Name of the bucket in which to find the source object.", "required": true, "location": "path" }, "sourceGeneration": { "type": "string", "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", "format": "int64", "location": "query" }, "sourceObject": { "type": "string", "description": "Name of the source object.", "required": true, "location": "path" } }, "parameterOrder": [ "sourceBucket", "sourceObject", "destinationBucket", "destinationObject" ], "request": { "$ref": "Object" }, "response": { "$ref": "RewriteResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.full_control", "https://www.googleapis.com/auth/devstorage.read_write" ] }, "update": { "id": "storage.objects.update", "path": "b/{bucket}/o/{object}", "httpMethod": "PUT", "description": "Updates an object's metadata.", "parameters": { "bucket": { "type": "string", "description": "Name of the bucket in which the object resides.", "required": true, "location": "path" }, "generation": { "type": "string", "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", "format": "int64", "location": "query" }, "ifGenerationMatch": { "type": "string", "description": "Makes the operation conditional on whether the object's current generation matches the given value.", "format": "int64", "location": "query" }, "ifGenerationNotMatch": { "type": "string", "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", "format": "int64", "location": "query" }, "ifMetagenerationMatch": { "type": "string", "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", "format": "int64", "location": "query" }, "ifMetagenerationNotMatch": { "type": "string", "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", "format": "int64", "location": "query" }, "object": { "type": "string", "description": "Name of the object.", "required": true, "location": "path" }, "predefinedAcl": { "type": "string", "description": "Apply a predefined set of access controls to this object.", "enum": [ "authenticatedRead", "bucketOwnerFullControl", "bucketOwnerRead", "private", "projectPrivate", "publicRead" ], "enumDescriptions": [ "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", "Object owner gets OWNER access, and project team owners get OWNER access.", "Object owner gets OWNER access, and project team owners get READER access.", "Object owner gets OWNER access.", "Object owner gets OWNER access, and project team members get access according to their roles.", "Object owner gets OWNER access, and allUsers get READER access." ], "location": "query" }, "projection": { "type": "string", "description": "Set of properties to return. Defaults to full.", "enum": [ "full", "noAcl" ], "enumDescriptions": [ "Include all properties.", "Omit the acl property." ], "location": "query" } }, "parameterOrder": [ "bucket", "object" ], "request": { "$ref": "Object" }, "response": { "$ref": "Object" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.full_control", "https://www.googleapis.com/auth/devstorage.read_write" ], "supportsMediaDownload": true }, "watchAll": { "id": "storage.objects.watchAll", "path": "b/{bucket}/o/watch", "httpMethod": "POST", "description": "Watch for changes on all objects in a bucket.", "parameters": { "bucket": { "type": "string", "description": "Name of the bucket in which to look for objects.", "required": true, "location": "path" }, "delimiter": { "type": "string", "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", "location": "query" }, "maxResults": { "type": "integer", "description": "Maximum number of items plus prefixes to return. As duplicate prefixes are omitted, fewer total results may be returned than requested. The default value of this parameter is 1,000 items.", "format": "uint32", "minimum": "0", "location": "query" }, "pageToken": { "type": "string", "description": "A previously-returned page token representing part of the larger set of results to view.", "location": "query" }, "prefix": { "type": "string", "description": "Filter results to objects whose names begin with this prefix.", "location": "query" }, "projection": { "type": "string", "description": "Set of properties to return. Defaults to noAcl.", "enum": [ "full", "noAcl" ], "enumDescriptions": [ "Include all properties.", "Omit the acl property." ], "location": "query" }, "versions": { "type": "boolean", "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", "location": "query" } }, "parameterOrder": [ "bucket" ], "request": { "$ref": "Channel", "parameterName": "resource" }, "response": { "$ref": "Channel" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.full_control", "https://www.googleapis.com/auth/devstorage.read_only", "https://www.googleapis.com/auth/devstorage.read_write" ], "supportsSubscription": true } } } } } ================================================ FILE: vendor/google.golang.org/api/storage/v1/storage-gen.go ================================================ // Package storage provides access to the Cloud Storage API. // // See https://developers.google.com/storage/docs/json_api/ // // Usage example: // // import "google.golang.org/api/storage/v1" // ... // storageService, err := storage.New(oauthHttpClient) package storage import ( "bytes" "encoding/json" "errors" "fmt" "golang.org/x/net/context" "google.golang.org/api/googleapi" "io" "net/http" "net/url" "strconv" "strings" ) // Always reference these packages, just in case the auto-generated code // below doesn't. var _ = bytes.NewBuffer var _ = strconv.Itoa var _ = fmt.Sprintf var _ = json.NewDecoder var _ = io.Copy var _ = url.Parse var _ = googleapi.Version var _ = errors.New var _ = strings.Replace var _ = context.Background const apiId = "storage:v1" const apiName = "storage" const apiVersion = "v1" const basePath = "https://www.googleapis.com/storage/v1/" // OAuth2 scopes used by this API. const ( // View and manage your data across Google Cloud Platform services CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" // Manage your data and permissions in Google Cloud Storage DevstorageFullControlScope = "https://www.googleapis.com/auth/devstorage.full_control" // View your data in Google Cloud Storage DevstorageReadOnlyScope = "https://www.googleapis.com/auth/devstorage.read_only" // Manage your data in Google Cloud Storage DevstorageReadWriteScope = "https://www.googleapis.com/auth/devstorage.read_write" ) func New(client *http.Client) (*Service, error) { if client == nil { return nil, errors.New("client is nil") } s := &Service{client: client, BasePath: basePath} s.BucketAccessControls = NewBucketAccessControlsService(s) s.Buckets = NewBucketsService(s) s.Channels = NewChannelsService(s) s.DefaultObjectAccessControls = NewDefaultObjectAccessControlsService(s) s.ObjectAccessControls = NewObjectAccessControlsService(s) s.Objects = NewObjectsService(s) return s, nil } type Service struct { client *http.Client BasePath string // API endpoint base URL UserAgent string // optional additional User-Agent fragment BucketAccessControls *BucketAccessControlsService Buckets *BucketsService Channels *ChannelsService DefaultObjectAccessControls *DefaultObjectAccessControlsService ObjectAccessControls *ObjectAccessControlsService Objects *ObjectsService } func (s *Service) userAgent() string { if s.UserAgent == "" { return googleapi.UserAgent } return googleapi.UserAgent + " " + s.UserAgent } func NewBucketAccessControlsService(s *Service) *BucketAccessControlsService { rs := &BucketAccessControlsService{s: s} return rs } type BucketAccessControlsService struct { s *Service } func NewBucketsService(s *Service) *BucketsService { rs := &BucketsService{s: s} return rs } type BucketsService struct { s *Service } func NewChannelsService(s *Service) *ChannelsService { rs := &ChannelsService{s: s} return rs } type ChannelsService struct { s *Service } func NewDefaultObjectAccessControlsService(s *Service) *DefaultObjectAccessControlsService { rs := &DefaultObjectAccessControlsService{s: s} return rs } type DefaultObjectAccessControlsService struct { s *Service } func NewObjectAccessControlsService(s *Service) *ObjectAccessControlsService { rs := &ObjectAccessControlsService{s: s} return rs } type ObjectAccessControlsService struct { s *Service } func NewObjectsService(s *Service) *ObjectsService { rs := &ObjectsService{s: s} return rs } type ObjectsService struct { s *Service } type Bucket struct { // Acl: Access controls on the bucket. Acl []*BucketAccessControl `json:"acl,omitempty"` // Cors: The bucket's Cross-Origin Resource Sharing (CORS) // configuration. Cors []*BucketCors `json:"cors,omitempty"` // DefaultObjectAcl: Default access controls to apply to new objects // when no ACL is provided. DefaultObjectAcl []*ObjectAccessControl `json:"defaultObjectAcl,omitempty"` // Etag: HTTP 1.1 Entity tag for the bucket. Etag string `json:"etag,omitempty"` // Id: The ID of the bucket. Id string `json:"id,omitempty"` // Kind: The kind of item this is. For buckets, this is always // storage#bucket. Kind string `json:"kind,omitempty"` // Lifecycle: The bucket's lifecycle configuration. See lifecycle // management for more information. Lifecycle *BucketLifecycle `json:"lifecycle,omitempty"` // Location: The location of the bucket. Object data for objects in the // bucket resides in physical storage within this region. Defaults to // US. See the developer's guide for the authoritative list. Location string `json:"location,omitempty"` // Logging: The bucket's logging configuration, which defines the // destination bucket and optional name prefix for the current bucket's // logs. Logging *BucketLogging `json:"logging,omitempty"` // Metageneration: The metadata generation of this bucket. Metageneration int64 `json:"metageneration,omitempty,string"` // Name: The name of the bucket. Name string `json:"name,omitempty"` // Owner: The owner of the bucket. This is always the project team's // owner group. Owner *BucketOwner `json:"owner,omitempty"` // ProjectNumber: The project number of the project the bucket belongs // to. ProjectNumber uint64 `json:"projectNumber,omitempty,string"` // SelfLink: The URI of this bucket. SelfLink string `json:"selfLink,omitempty"` // StorageClass: The bucket's storage class. This defines how objects in // the bucket are stored and determines the SLA and the cost of storage. // Values include STANDARD, NEARLINE and DURABLE_REDUCED_AVAILABILITY. // Defaults to STANDARD. For more information, see storage classes. StorageClass string `json:"storageClass,omitempty"` // TimeCreated: Creation time of the bucket in RFC 3339 format. TimeCreated string `json:"timeCreated,omitempty"` // Versioning: The bucket's versioning configuration. Versioning *BucketVersioning `json:"versioning,omitempty"` // Website: The bucket's website configuration. Website *BucketWebsite `json:"website,omitempty"` } type BucketCors struct { // MaxAgeSeconds: The value, in seconds, to return in the // Access-Control-Max-Age header used in preflight responses. MaxAgeSeconds int64 `json:"maxAgeSeconds,omitempty"` // Method: The list of HTTP methods on which to include CORS response // headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list // of methods, and means "any method". Method []string `json:"method,omitempty"` // Origin: The list of Origins eligible to receive CORS response // headers. Note: "*" is permitted in the list of origins, and means // "any Origin". Origin []string `json:"origin,omitempty"` // ResponseHeader: The list of HTTP headers other than the simple // response headers to give permission for the user-agent to share // across domains. ResponseHeader []string `json:"responseHeader,omitempty"` } type BucketLifecycle struct { // Rule: A lifecycle management rule, which is made of an action to take // and the condition(s) under which the action will be taken. Rule []*BucketLifecycleRule `json:"rule,omitempty"` } type BucketLifecycleRule struct { // Action: The action to take. Action *BucketLifecycleRuleAction `json:"action,omitempty"` // Condition: The condition(s) under which the action will be taken. Condition *BucketLifecycleRuleCondition `json:"condition,omitempty"` } type BucketLifecycleRuleAction struct { // Type: Type of the action. Currently, only Delete is supported. Type string `json:"type,omitempty"` } type BucketLifecycleRuleCondition struct { // Age: Age of an object (in days). This condition is satisfied when an // object reaches the specified age. Age int64 `json:"age,omitempty"` // CreatedBefore: A date in RFC 3339 format with only the date part (for // instance, "2013-01-15"). This condition is satisfied when an object // is created before midnight of the specified date in UTC. CreatedBefore string `json:"createdBefore,omitempty"` // IsLive: Relevant only for versioned objects. If the value is true, // this condition matches live objects; if the value is false, it // matches archived objects. IsLive bool `json:"isLive,omitempty"` // NumNewerVersions: Relevant only for versioned objects. If the value // is N, this condition is satisfied when there are at least N versions // (including the live version) newer than this version of the object. NumNewerVersions int64 `json:"numNewerVersions,omitempty"` } type BucketLogging struct { // LogBucket: The destination bucket where the current bucket's logs // should be placed. LogBucket string `json:"logBucket,omitempty"` // LogObjectPrefix: A prefix for log object names. LogObjectPrefix string `json:"logObjectPrefix,omitempty"` } type BucketOwner struct { // Entity: The entity, in the form project-owner-projectId. Entity string `json:"entity,omitempty"` // EntityId: The ID for the entity. EntityId string `json:"entityId,omitempty"` } type BucketVersioning struct { // Enabled: While set to true, versioning is fully enabled for this // bucket. Enabled bool `json:"enabled,omitempty"` } type BucketWebsite struct { // MainPageSuffix: Behaves as the bucket's directory index where missing // objects are treated as potential directories. MainPageSuffix string `json:"mainPageSuffix,omitempty"` // NotFoundPage: The custom object to return when a requested resource // is not found. NotFoundPage string `json:"notFoundPage,omitempty"` } type BucketAccessControl struct { // Bucket: The name of the bucket. Bucket string `json:"bucket,omitempty"` // Domain: The domain associated with the entity, if any. Domain string `json:"domain,omitempty"` // Email: The email address associated with the entity, if any. Email string `json:"email,omitempty"` // Entity: The entity holding the permission, in one of the following // forms: // - user-userId // - user-email // - group-groupId // - group-email // - domain-domain // - project-team-projectId // - allUsers // - allAuthenticatedUsers Examples: // - The user liz@example.com would be user-liz@example.com. // - The group example@googlegroups.com would be // group-example@googlegroups.com. // - To refer to all members of the Google Apps for Business domain // example.com, the entity would be domain-example.com. Entity string `json:"entity,omitempty"` // EntityId: The ID for the entity, if any. EntityId string `json:"entityId,omitempty"` // Etag: HTTP 1.1 Entity tag for the access-control entry. Etag string `json:"etag,omitempty"` // Id: The ID of the access-control entry. Id string `json:"id,omitempty"` // Kind: The kind of item this is. For bucket access control entries, // this is always storage#bucketAccessControl. Kind string `json:"kind,omitempty"` // ProjectTeam: The project team associated with the entity, if any. ProjectTeam *BucketAccessControlProjectTeam `json:"projectTeam,omitempty"` // Role: The access permission for the entity. Can be READER, WRITER, or // OWNER. Role string `json:"role,omitempty"` // SelfLink: The link to this access-control entry. SelfLink string `json:"selfLink,omitempty"` } type BucketAccessControlProjectTeam struct { // ProjectNumber: The project number. ProjectNumber string `json:"projectNumber,omitempty"` // Team: The team. Can be owners, editors, or viewers. Team string `json:"team,omitempty"` } type BucketAccessControls struct { // Items: The list of items. Items []*BucketAccessControl `json:"items,omitempty"` // Kind: The kind of item this is. For lists of bucket access control // entries, this is always storage#bucketAccessControls. Kind string `json:"kind,omitempty"` } type Buckets struct { // Items: The list of items. Items []*Bucket `json:"items,omitempty"` // Kind: The kind of item this is. For lists of buckets, this is always // storage#buckets. Kind string `json:"kind,omitempty"` // NextPageToken: The continuation token, used to page through large // result sets. Provide this value in a subsequent request to return the // next page of results. NextPageToken string `json:"nextPageToken,omitempty"` } type Channel struct { // Address: The address where notifications are delivered for this // channel. Address string `json:"address,omitempty"` // Expiration: Date and time of notification channel expiration, // expressed as a Unix timestamp, in milliseconds. Optional. Expiration int64 `json:"expiration,omitempty,string"` // Id: A UUID or similar unique string that identifies this channel. Id string `json:"id,omitempty"` // Kind: Identifies this as a notification channel used to watch for // changes to a resource. Value: the fixed string "api#channel". Kind string `json:"kind,omitempty"` // Params: Additional parameters controlling delivery channel behavior. // Optional. Params map[string]string `json:"params,omitempty"` // Payload: A Boolean value to indicate whether payload is wanted. // Optional. Payload bool `json:"payload,omitempty"` // ResourceId: An opaque ID that identifies the resource being watched // on this channel. Stable across different API versions. ResourceId string `json:"resourceId,omitempty"` // ResourceUri: A version-specific identifier for the watched resource. ResourceUri string `json:"resourceUri,omitempty"` // Token: An arbitrary string delivered to the target address with each // notification delivered over this channel. Optional. Token string `json:"token,omitempty"` // Type: The type of delivery mechanism used for this channel. Type string `json:"type,omitempty"` } type ComposeRequest struct { // Destination: Properties of the resulting object. Destination *Object `json:"destination,omitempty"` // Kind: The kind of item this is. Kind string `json:"kind,omitempty"` // SourceObjects: The list of source objects that will be concatenated // into a single object. SourceObjects []*ComposeRequestSourceObjects `json:"sourceObjects,omitempty"` } type ComposeRequestSourceObjects struct { // Generation: The generation of this object to use as the source. Generation int64 `json:"generation,omitempty,string"` // Name: The source object's name. The source object's bucket is // implicitly the destination bucket. Name string `json:"name,omitempty"` // ObjectPreconditions: Conditions that must be met for this operation // to execute. ObjectPreconditions *ComposeRequestSourceObjectsObjectPreconditions `json:"objectPreconditions,omitempty"` } type ComposeRequestSourceObjectsObjectPreconditions struct { // IfGenerationMatch: Only perform the composition if the generation of // the source object that would be used matches this value. If this // value and a generation are both specified, they must be the same // value or the call will fail. IfGenerationMatch int64 `json:"ifGenerationMatch,omitempty,string"` } type Object struct { // Acl: Access controls on the object. Acl []*ObjectAccessControl `json:"acl,omitempty"` // Bucket: The name of the bucket containing this object. Bucket string `json:"bucket,omitempty"` // CacheControl: Cache-Control directive for the object data. CacheControl string `json:"cacheControl,omitempty"` // ComponentCount: Number of underlying components that make up this // object. Components are accumulated by compose operations. ComponentCount int64 `json:"componentCount,omitempty"` // ContentDisposition: Content-Disposition of the object data. ContentDisposition string `json:"contentDisposition,omitempty"` // ContentEncoding: Content-Encoding of the object data. ContentEncoding string `json:"contentEncoding,omitempty"` // ContentLanguage: Content-Language of the object data. ContentLanguage string `json:"contentLanguage,omitempty"` // ContentType: Content-Type of the object data. ContentType string `json:"contentType,omitempty"` // Crc32c: CRC32c checksum, as described in RFC 4960, Appendix B; // encoded using base64 in big-endian byte order. Crc32c string `json:"crc32c,omitempty"` // Etag: HTTP 1.1 Entity tag for the object. Etag string `json:"etag,omitempty"` // Generation: The content generation of this object. Used for object // versioning. Generation int64 `json:"generation,omitempty,string"` // Id: The ID of the object. Id string `json:"id,omitempty"` // Kind: The kind of item this is. For objects, this is always // storage#object. Kind string `json:"kind,omitempty"` // Md5Hash: MD5 hash of the data; encoded using base64. Md5Hash string `json:"md5Hash,omitempty"` // MediaLink: Media download link. MediaLink string `json:"mediaLink,omitempty"` // Metadata: User-provided metadata, in key/value pairs. Metadata map[string]string `json:"metadata,omitempty"` // Metageneration: The version of the metadata for this object at this // generation. Used for preconditions and for detecting changes in // metadata. A metageneration number is only meaningful in the context // of a particular generation of a particular object. Metageneration int64 `json:"metageneration,omitempty,string"` // Name: The name of this object. Required if not specified by URL // parameter. Name string `json:"name,omitempty"` // Owner: The owner of the object. This will always be the uploader of // the object. Owner *ObjectOwner `json:"owner,omitempty"` // SelfLink: The link to this object. SelfLink string `json:"selfLink,omitempty"` // Size: Content-Length of the data in bytes. Size uint64 `json:"size,omitempty,string"` // StorageClass: Storage class of the object. StorageClass string `json:"storageClass,omitempty"` // TimeDeleted: The deletion time of the object in RFC 3339 format. Will // be returned if and only if this version of the object has been // deleted. TimeDeleted string `json:"timeDeleted,omitempty"` // Updated: The creation or modification time of the object in RFC 3339 // format. For buckets with versioning enabled, changing an object's // metadata does not change this property. Updated string `json:"updated,omitempty"` } type ObjectOwner struct { // Entity: The entity, in the form user-userId. Entity string `json:"entity,omitempty"` // EntityId: The ID for the entity. EntityId string `json:"entityId,omitempty"` } type ObjectAccessControl struct { // Bucket: The name of the bucket. Bucket string `json:"bucket,omitempty"` // Domain: The domain associated with the entity, if any. Domain string `json:"domain,omitempty"` // Email: The email address associated with the entity, if any. Email string `json:"email,omitempty"` // Entity: The entity holding the permission, in one of the following // forms: // - user-userId // - user-email // - group-groupId // - group-email // - domain-domain // - project-team-projectId // - allUsers // - allAuthenticatedUsers Examples: // - The user liz@example.com would be user-liz@example.com. // - The group example@googlegroups.com would be // group-example@googlegroups.com. // - To refer to all members of the Google Apps for Business domain // example.com, the entity would be domain-example.com. Entity string `json:"entity,omitempty"` // EntityId: The ID for the entity, if any. EntityId string `json:"entityId,omitempty"` // Etag: HTTP 1.1 Entity tag for the access-control entry. Etag string `json:"etag,omitempty"` // Generation: The content generation of the object. Generation int64 `json:"generation,omitempty,string"` // Id: The ID of the access-control entry. Id string `json:"id,omitempty"` // Kind: The kind of item this is. For object access control entries, // this is always storage#objectAccessControl. Kind string `json:"kind,omitempty"` // Object: The name of the object. Object string `json:"object,omitempty"` // ProjectTeam: The project team associated with the entity, if any. ProjectTeam *ObjectAccessControlProjectTeam `json:"projectTeam,omitempty"` // Role: The access permission for the entity. Can be READER or OWNER. Role string `json:"role,omitempty"` // SelfLink: The link to this access-control entry. SelfLink string `json:"selfLink,omitempty"` } type ObjectAccessControlProjectTeam struct { // ProjectNumber: The project number. ProjectNumber string `json:"projectNumber,omitempty"` // Team: The team. Can be owners, editors, or viewers. Team string `json:"team,omitempty"` } type ObjectAccessControls struct { // Items: The list of items. Items []interface{} `json:"items,omitempty"` // Kind: The kind of item this is. For lists of object access control // entries, this is always storage#objectAccessControls. Kind string `json:"kind,omitempty"` } type Objects struct { // Items: The list of items. Items []*Object `json:"items,omitempty"` // Kind: The kind of item this is. For lists of objects, this is always // storage#objects. Kind string `json:"kind,omitempty"` // NextPageToken: The continuation token, used to page through large // result sets. Provide this value in a subsequent request to return the // next page of results. NextPageToken string `json:"nextPageToken,omitempty"` // Prefixes: The list of prefixes of objects matching-but-not-listed up // to and including the requested delimiter. Prefixes []string `json:"prefixes,omitempty"` } type RewriteResponse struct { // Done: true if the copy is finished; otherwise, false if the copy is // in progress. This property is always present in the response. Done bool `json:"done,omitempty"` // Kind: The kind of item this is. Kind string `json:"kind,omitempty"` // ObjectSize: The total size of the object being copied in bytes. This // property is always present in the response. ObjectSize uint64 `json:"objectSize,omitempty,string"` // Resource: A resource containing the metadata for the copied-to // object. This property is present in the response only when copying // completes. Resource *Object `json:"resource,omitempty"` // RewriteToken: A token to use in subsequent requests to continue // copying data. This token is present in the response only when there // is more data to copy. RewriteToken string `json:"rewriteToken,omitempty"` // TotalBytesRewritten: The total bytes written so far, which can be // used to provide a waiting user with a progress indicator. This // property is always present in the response. TotalBytesRewritten uint64 `json:"totalBytesRewritten,omitempty,string"` } // method id "storage.bucketAccessControls.delete": type BucketAccessControlsDeleteCall struct { s *Service bucket string entity string opt_ map[string]interface{} } // Delete: Permanently deletes the ACL entry for the specified entity on // the specified bucket. func (r *BucketAccessControlsService) Delete(bucket string, entity string) *BucketAccessControlsDeleteCall { c := &BucketAccessControlsDeleteCall{s: r.s, opt_: make(map[string]interface{})} c.bucket = bucket c.entity = entity return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketAccessControlsDeleteCall) Fields(s ...googleapi.Field) *BucketAccessControlsDeleteCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *BucketAccessControlsDeleteCall) Do() error { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") urls += "?" + params.Encode() req, _ := http.NewRequest("DELETE", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "entity": c.entity, }) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return err } return nil // { // "description": "Permanently deletes the ACL entry for the specified entity on the specified bucket.", // "httpMethod": "DELETE", // "id": "storage.bucketAccessControls.delete", // "parameterOrder": [ // "bucket", // "entity" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "entity": { // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/acl/{entity}", // "scopes": [ // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.bucketAccessControls.get": type BucketAccessControlsGetCall struct { s *Service bucket string entity string opt_ map[string]interface{} } // Get: Returns the ACL entry for the specified entity on the specified // bucket. func (r *BucketAccessControlsService) Get(bucket string, entity string) *BucketAccessControlsGetCall { c := &BucketAccessControlsGetCall{s: r.s, opt_: make(map[string]interface{})} c.bucket = bucket c.entity = entity return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketAccessControlsGetCall) Fields(s ...googleapi.Field) *BucketAccessControlsGetCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *BucketAccessControlsGetCall) Do() (*BucketAccessControl, error) { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") urls += "?" + params.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "entity": c.entity, }) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *BucketAccessControl if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Returns the ACL entry for the specified entity on the specified bucket.", // "httpMethod": "GET", // "id": "storage.bucketAccessControls.get", // "parameterOrder": [ // "bucket", // "entity" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "entity": { // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/acl/{entity}", // "response": { // "$ref": "BucketAccessControl" // }, // "scopes": [ // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.bucketAccessControls.insert": type BucketAccessControlsInsertCall struct { s *Service bucket string bucketaccesscontrol *BucketAccessControl opt_ map[string]interface{} } // Insert: Creates a new ACL entry on the specified bucket. func (r *BucketAccessControlsService) Insert(bucket string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsInsertCall { c := &BucketAccessControlsInsertCall{s: r.s, opt_: make(map[string]interface{})} c.bucket = bucket c.bucketaccesscontrol = bucketaccesscontrol return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketAccessControlsInsertCall) Fields(s ...googleapi.Field) *BucketAccessControlsInsertCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *BucketAccessControlsInsertCall) Do() (*BucketAccessControl, error) { var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) if err != nil { return nil, err } ctype := "application/json" params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") urls += "?" + params.Encode() req, _ := http.NewRequest("POST", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) req.Header.Set("Content-Type", ctype) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *BucketAccessControl if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Creates a new ACL entry on the specified bucket.", // "httpMethod": "POST", // "id": "storage.bucketAccessControls.insert", // "parameterOrder": [ // "bucket" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/acl", // "request": { // "$ref": "BucketAccessControl" // }, // "response": { // "$ref": "BucketAccessControl" // }, // "scopes": [ // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.bucketAccessControls.list": type BucketAccessControlsListCall struct { s *Service bucket string opt_ map[string]interface{} } // List: Retrieves ACL entries on the specified bucket. func (r *BucketAccessControlsService) List(bucket string) *BucketAccessControlsListCall { c := &BucketAccessControlsListCall{s: r.s, opt_: make(map[string]interface{})} c.bucket = bucket return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketAccessControlsListCall) Fields(s ...googleapi.Field) *BucketAccessControlsListCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *BucketAccessControlsListCall) Do() (*BucketAccessControls, error) { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") urls += "?" + params.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *BucketAccessControls if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Retrieves ACL entries on the specified bucket.", // "httpMethod": "GET", // "id": "storage.bucketAccessControls.list", // "parameterOrder": [ // "bucket" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/acl", // "response": { // "$ref": "BucketAccessControls" // }, // "scopes": [ // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.bucketAccessControls.patch": type BucketAccessControlsPatchCall struct { s *Service bucket string entity string bucketaccesscontrol *BucketAccessControl opt_ map[string]interface{} } // Patch: Updates an ACL entry on the specified bucket. This method // supports patch semantics. func (r *BucketAccessControlsService) Patch(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsPatchCall { c := &BucketAccessControlsPatchCall{s: r.s, opt_: make(map[string]interface{})} c.bucket = bucket c.entity = entity c.bucketaccesscontrol = bucketaccesscontrol return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketAccessControlsPatchCall) Fields(s ...googleapi.Field) *BucketAccessControlsPatchCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *BucketAccessControlsPatchCall) Do() (*BucketAccessControl, error) { var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) if err != nil { return nil, err } ctype := "application/json" params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") urls += "?" + params.Encode() req, _ := http.NewRequest("PATCH", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "entity": c.entity, }) req.Header.Set("Content-Type", ctype) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *BucketAccessControl if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Updates an ACL entry on the specified bucket. This method supports patch semantics.", // "httpMethod": "PATCH", // "id": "storage.bucketAccessControls.patch", // "parameterOrder": [ // "bucket", // "entity" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "entity": { // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/acl/{entity}", // "request": { // "$ref": "BucketAccessControl" // }, // "response": { // "$ref": "BucketAccessControl" // }, // "scopes": [ // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.bucketAccessControls.update": type BucketAccessControlsUpdateCall struct { s *Service bucket string entity string bucketaccesscontrol *BucketAccessControl opt_ map[string]interface{} } // Update: Updates an ACL entry on the specified bucket. func (r *BucketAccessControlsService) Update(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsUpdateCall { c := &BucketAccessControlsUpdateCall{s: r.s, opt_: make(map[string]interface{})} c.bucket = bucket c.entity = entity c.bucketaccesscontrol = bucketaccesscontrol return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketAccessControlsUpdateCall) Fields(s ...googleapi.Field) *BucketAccessControlsUpdateCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *BucketAccessControlsUpdateCall) Do() (*BucketAccessControl, error) { var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) if err != nil { return nil, err } ctype := "application/json" params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") urls += "?" + params.Encode() req, _ := http.NewRequest("PUT", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "entity": c.entity, }) req.Header.Set("Content-Type", ctype) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *BucketAccessControl if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Updates an ACL entry on the specified bucket.", // "httpMethod": "PUT", // "id": "storage.bucketAccessControls.update", // "parameterOrder": [ // "bucket", // "entity" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "entity": { // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/acl/{entity}", // "request": { // "$ref": "BucketAccessControl" // }, // "response": { // "$ref": "BucketAccessControl" // }, // "scopes": [ // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.buckets.delete": type BucketsDeleteCall struct { s *Service bucket string opt_ map[string]interface{} } // Delete: Permanently deletes an empty bucket. func (r *BucketsService) Delete(bucket string) *BucketsDeleteCall { c := &BucketsDeleteCall{s: r.s, opt_: make(map[string]interface{})} c.bucket = bucket return c } // IfMetagenerationMatch sets the optional parameter // "ifMetagenerationMatch": If set, only deletes the bucket if its // metageneration matches this value. func (c *BucketsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsDeleteCall { c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch return c } // IfMetagenerationNotMatch sets the optional parameter // "ifMetagenerationNotMatch": If set, only deletes the bucket if its // metageneration does not match this value. func (c *BucketsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsDeleteCall { c.opt_["ifMetagenerationNotMatch"] = ifMetagenerationNotMatch return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketsDeleteCall) Fields(s ...googleapi.Field) *BucketsDeleteCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *BucketsDeleteCall) Do() error { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["ifMetagenerationMatch"]; ok { params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifMetagenerationNotMatch"]; ok { params.Set("ifMetagenerationNotMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") urls += "?" + params.Encode() req, _ := http.NewRequest("DELETE", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return err } return nil // { // "description": "Permanently deletes an empty bucket.", // "httpMethod": "DELETE", // "id": "storage.buckets.delete", // "parameterOrder": [ // "bucket" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "ifMetagenerationMatch": { // "description": "If set, only deletes the bucket if its metageneration matches this value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationNotMatch": { // "description": "If set, only deletes the bucket if its metageneration does not match this value.", // "format": "int64", // "location": "query", // "type": "string" // } // }, // "path": "b/{bucket}", // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } // method id "storage.buckets.get": type BucketsGetCall struct { s *Service bucket string opt_ map[string]interface{} } // Get: Returns metadata for the specified bucket. func (r *BucketsService) Get(bucket string) *BucketsGetCall { c := &BucketsGetCall{s: r.s, opt_: make(map[string]interface{})} c.bucket = bucket return c } // IfMetagenerationMatch sets the optional parameter // "ifMetagenerationMatch": Makes the return of the bucket metadata // conditional on whether the bucket's current metageneration matches // the given value. func (c *BucketsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsGetCall { c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch return c } // IfMetagenerationNotMatch sets the optional parameter // "ifMetagenerationNotMatch": Makes the return of the bucket metadata // conditional on whether the bucket's current metageneration does not // match the given value. func (c *BucketsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsGetCall { c.opt_["ifMetagenerationNotMatch"] = ifMetagenerationNotMatch return c } // Projection sets the optional parameter "projection": Set of // properties to return. Defaults to noAcl. // // Possible values: // "full" - Include all properties. // "noAcl" - Omit acl and defaultObjectAcl properties. func (c *BucketsGetCall) Projection(projection string) *BucketsGetCall { c.opt_["projection"] = projection return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketsGetCall) Fields(s ...googleapi.Field) *BucketsGetCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *BucketsGetCall) Do() (*Bucket, error) { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["ifMetagenerationMatch"]; ok { params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifMetagenerationNotMatch"]; ok { params.Set("ifMetagenerationNotMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["projection"]; ok { params.Set("projection", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") urls += "?" + params.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *Bucket if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Returns metadata for the specified bucket.", // "httpMethod": "GET", // "id": "storage.buckets.get", // "parameterOrder": [ // "bucket" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "ifMetagenerationMatch": { // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationNotMatch": { // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "projection": { // "description": "Set of properties to return. Defaults to noAcl.", // "enum": [ // "full", // "noAcl" // ], // "enumDescriptions": [ // "Include all properties.", // "Omit acl and defaultObjectAcl properties." // ], // "location": "query", // "type": "string" // } // }, // "path": "b/{bucket}", // "response": { // "$ref": "Bucket" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_only", // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } // method id "storage.buckets.insert": type BucketsInsertCall struct { s *Service projectid string bucket *Bucket opt_ map[string]interface{} } // Insert: Creates a new bucket. func (r *BucketsService) Insert(projectid string, bucket *Bucket) *BucketsInsertCall { c := &BucketsInsertCall{s: r.s, opt_: make(map[string]interface{})} c.projectid = projectid c.bucket = bucket return c } // PredefinedAcl sets the optional parameter "predefinedAcl": Apply a // predefined set of access controls to this bucket. // // Possible values: // "authenticatedRead" - Project team owners get OWNER access, and // allAuthenticatedUsers get READER access. // "private" - Project team owners get OWNER access. // "projectPrivate" - Project team members get access according to // their roles. // "publicRead" - Project team owners get OWNER access, and allUsers // get READER access. // "publicReadWrite" - Project team owners get OWNER access, and // allUsers get WRITER access. func (c *BucketsInsertCall) PredefinedAcl(predefinedAcl string) *BucketsInsertCall { c.opt_["predefinedAcl"] = predefinedAcl return c } // PredefinedDefaultObjectAcl sets the optional parameter // "predefinedDefaultObjectAcl": Apply a predefined set of default // object access controls to this bucket. // // Possible values: // "authenticatedRead" - Object owner gets OWNER access, and // allAuthenticatedUsers get READER access. // "bucketOwnerFullControl" - Object owner gets OWNER access, and // project team owners get OWNER access. // "bucketOwnerRead" - Object owner gets OWNER access, and project // team owners get READER access. // "private" - Object owner gets OWNER access. // "projectPrivate" - Object owner gets OWNER access, and project team // members get access according to their roles. // "publicRead" - Object owner gets OWNER access, and allUsers get // READER access. func (c *BucketsInsertCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsInsertCall { c.opt_["predefinedDefaultObjectAcl"] = predefinedDefaultObjectAcl return c } // Projection sets the optional parameter "projection": Set of // properties to return. Defaults to noAcl, unless the bucket resource // specifies acl or defaultObjectAcl properties, when it defaults to // full. // // Possible values: // "full" - Include all properties. // "noAcl" - Omit acl and defaultObjectAcl properties. func (c *BucketsInsertCall) Projection(projection string) *BucketsInsertCall { c.opt_["projection"] = projection return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketsInsertCall) Fields(s ...googleapi.Field) *BucketsInsertCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *BucketsInsertCall) Do() (*Bucket, error) { var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket) if err != nil { return nil, err } ctype := "application/json" params := make(url.Values) params.Set("alt", "json") params.Set("project", fmt.Sprintf("%v", c.projectid)) if v, ok := c.opt_["predefinedAcl"]; ok { params.Set("predefinedAcl", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["predefinedDefaultObjectAcl"]; ok { params.Set("predefinedDefaultObjectAcl", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["projection"]; ok { params.Set("projection", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "b") urls += "?" + params.Encode() req, _ := http.NewRequest("POST", urls, body) googleapi.SetOpaque(req.URL) req.Header.Set("Content-Type", ctype) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *Bucket if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Creates a new bucket.", // "httpMethod": "POST", // "id": "storage.buckets.insert", // "parameterOrder": [ // "project" // ], // "parameters": { // "predefinedAcl": { // "description": "Apply a predefined set of access controls to this bucket.", // "enum": [ // "authenticatedRead", // "private", // "projectPrivate", // "publicRead", // "publicReadWrite" // ], // "enumDescriptions": [ // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", // "Project team owners get OWNER access.", // "Project team members get access according to their roles.", // "Project team owners get OWNER access, and allUsers get READER access.", // "Project team owners get OWNER access, and allUsers get WRITER access." // ], // "location": "query", // "type": "string" // }, // "predefinedDefaultObjectAcl": { // "description": "Apply a predefined set of default object access controls to this bucket.", // "enum": [ // "authenticatedRead", // "bucketOwnerFullControl", // "bucketOwnerRead", // "private", // "projectPrivate", // "publicRead" // ], // "enumDescriptions": [ // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", // "Object owner gets OWNER access, and project team owners get OWNER access.", // "Object owner gets OWNER access, and project team owners get READER access.", // "Object owner gets OWNER access.", // "Object owner gets OWNER access, and project team members get access according to their roles.", // "Object owner gets OWNER access, and allUsers get READER access." // ], // "location": "query", // "type": "string" // }, // "project": { // "description": "A valid API project identifier.", // "location": "query", // "required": true, // "type": "string" // }, // "projection": { // "description": "Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.", // "enum": [ // "full", // "noAcl" // ], // "enumDescriptions": [ // "Include all properties.", // "Omit acl and defaultObjectAcl properties." // ], // "location": "query", // "type": "string" // } // }, // "path": "b", // "request": { // "$ref": "Bucket" // }, // "response": { // "$ref": "Bucket" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } // method id "storage.buckets.list": type BucketsListCall struct { s *Service projectid string opt_ map[string]interface{} } // List: Retrieves a list of buckets for a given project. func (r *BucketsService) List(projectid string) *BucketsListCall { c := &BucketsListCall{s: r.s, opt_: make(map[string]interface{})} c.projectid = projectid return c } // MaxResults sets the optional parameter "maxResults": Maximum number // of buckets to return. func (c *BucketsListCall) MaxResults(maxResults int64) *BucketsListCall { c.opt_["maxResults"] = maxResults return c } // PageToken sets the optional parameter "pageToken": A // previously-returned page token representing part of the larger set of // results to view. func (c *BucketsListCall) PageToken(pageToken string) *BucketsListCall { c.opt_["pageToken"] = pageToken return c } // Prefix sets the optional parameter "prefix": Filter results to // buckets whose names begin with this prefix. func (c *BucketsListCall) Prefix(prefix string) *BucketsListCall { c.opt_["prefix"] = prefix return c } // Projection sets the optional parameter "projection": Set of // properties to return. Defaults to noAcl. // // Possible values: // "full" - Include all properties. // "noAcl" - Omit acl and defaultObjectAcl properties. func (c *BucketsListCall) Projection(projection string) *BucketsListCall { c.opt_["projection"] = projection return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketsListCall) Fields(s ...googleapi.Field) *BucketsListCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *BucketsListCall) Do() (*Buckets, error) { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") params.Set("project", fmt.Sprintf("%v", c.projectid)) if v, ok := c.opt_["maxResults"]; ok { params.Set("maxResults", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["pageToken"]; ok { params.Set("pageToken", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["prefix"]; ok { params.Set("prefix", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["projection"]; ok { params.Set("projection", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "b") urls += "?" + params.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.SetOpaque(req.URL) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *Buckets if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Retrieves a list of buckets for a given project.", // "httpMethod": "GET", // "id": "storage.buckets.list", // "parameterOrder": [ // "project" // ], // "parameters": { // "maxResults": { // "description": "Maximum number of buckets to return.", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "pageToken": { // "description": "A previously-returned page token representing part of the larger set of results to view.", // "location": "query", // "type": "string" // }, // "prefix": { // "description": "Filter results to buckets whose names begin with this prefix.", // "location": "query", // "type": "string" // }, // "project": { // "description": "A valid API project identifier.", // "location": "query", // "required": true, // "type": "string" // }, // "projection": { // "description": "Set of properties to return. Defaults to noAcl.", // "enum": [ // "full", // "noAcl" // ], // "enumDescriptions": [ // "Include all properties.", // "Omit acl and defaultObjectAcl properties." // ], // "location": "query", // "type": "string" // } // }, // "path": "b", // "response": { // "$ref": "Buckets" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_only", // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } // method id "storage.buckets.patch": type BucketsPatchCall struct { s *Service bucket string bucket2 *Bucket opt_ map[string]interface{} } // Patch: Updates a bucket. This method supports patch semantics. func (r *BucketsService) Patch(bucket string, bucket2 *Bucket) *BucketsPatchCall { c := &BucketsPatchCall{s: r.s, opt_: make(map[string]interface{})} c.bucket = bucket c.bucket2 = bucket2 return c } // IfMetagenerationMatch sets the optional parameter // "ifMetagenerationMatch": Makes the return of the bucket metadata // conditional on whether the bucket's current metageneration matches // the given value. func (c *BucketsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsPatchCall { c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch return c } // IfMetagenerationNotMatch sets the optional parameter // "ifMetagenerationNotMatch": Makes the return of the bucket metadata // conditional on whether the bucket's current metageneration does not // match the given value. func (c *BucketsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsPatchCall { c.opt_["ifMetagenerationNotMatch"] = ifMetagenerationNotMatch return c } // PredefinedAcl sets the optional parameter "predefinedAcl": Apply a // predefined set of access controls to this bucket. // // Possible values: // "authenticatedRead" - Project team owners get OWNER access, and // allAuthenticatedUsers get READER access. // "private" - Project team owners get OWNER access. // "projectPrivate" - Project team members get access according to // their roles. // "publicRead" - Project team owners get OWNER access, and allUsers // get READER access. // "publicReadWrite" - Project team owners get OWNER access, and // allUsers get WRITER access. func (c *BucketsPatchCall) PredefinedAcl(predefinedAcl string) *BucketsPatchCall { c.opt_["predefinedAcl"] = predefinedAcl return c } // PredefinedDefaultObjectAcl sets the optional parameter // "predefinedDefaultObjectAcl": Apply a predefined set of default // object access controls to this bucket. // // Possible values: // "authenticatedRead" - Object owner gets OWNER access, and // allAuthenticatedUsers get READER access. // "bucketOwnerFullControl" - Object owner gets OWNER access, and // project team owners get OWNER access. // "bucketOwnerRead" - Object owner gets OWNER access, and project // team owners get READER access. // "private" - Object owner gets OWNER access. // "projectPrivate" - Object owner gets OWNER access, and project team // members get access according to their roles. // "publicRead" - Object owner gets OWNER access, and allUsers get // READER access. func (c *BucketsPatchCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsPatchCall { c.opt_["predefinedDefaultObjectAcl"] = predefinedDefaultObjectAcl return c } // Projection sets the optional parameter "projection": Set of // properties to return. Defaults to full. // // Possible values: // "full" - Include all properties. // "noAcl" - Omit acl and defaultObjectAcl properties. func (c *BucketsPatchCall) Projection(projection string) *BucketsPatchCall { c.opt_["projection"] = projection return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketsPatchCall) Fields(s ...googleapi.Field) *BucketsPatchCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *BucketsPatchCall) Do() (*Bucket, error) { var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) if err != nil { return nil, err } ctype := "application/json" params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["ifMetagenerationMatch"]; ok { params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifMetagenerationNotMatch"]; ok { params.Set("ifMetagenerationNotMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["predefinedAcl"]; ok { params.Set("predefinedAcl", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["predefinedDefaultObjectAcl"]; ok { params.Set("predefinedDefaultObjectAcl", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["projection"]; ok { params.Set("projection", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") urls += "?" + params.Encode() req, _ := http.NewRequest("PATCH", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) req.Header.Set("Content-Type", ctype) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *Bucket if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Updates a bucket. This method supports patch semantics.", // "httpMethod": "PATCH", // "id": "storage.buckets.patch", // "parameterOrder": [ // "bucket" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "ifMetagenerationMatch": { // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationNotMatch": { // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "predefinedAcl": { // "description": "Apply a predefined set of access controls to this bucket.", // "enum": [ // "authenticatedRead", // "private", // "projectPrivate", // "publicRead", // "publicReadWrite" // ], // "enumDescriptions": [ // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", // "Project team owners get OWNER access.", // "Project team members get access according to their roles.", // "Project team owners get OWNER access, and allUsers get READER access.", // "Project team owners get OWNER access, and allUsers get WRITER access." // ], // "location": "query", // "type": "string" // }, // "predefinedDefaultObjectAcl": { // "description": "Apply a predefined set of default object access controls to this bucket.", // "enum": [ // "authenticatedRead", // "bucketOwnerFullControl", // "bucketOwnerRead", // "private", // "projectPrivate", // "publicRead" // ], // "enumDescriptions": [ // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", // "Object owner gets OWNER access, and project team owners get OWNER access.", // "Object owner gets OWNER access, and project team owners get READER access.", // "Object owner gets OWNER access.", // "Object owner gets OWNER access, and project team members get access according to their roles.", // "Object owner gets OWNER access, and allUsers get READER access." // ], // "location": "query", // "type": "string" // }, // "projection": { // "description": "Set of properties to return. Defaults to full.", // "enum": [ // "full", // "noAcl" // ], // "enumDescriptions": [ // "Include all properties.", // "Omit acl and defaultObjectAcl properties." // ], // "location": "query", // "type": "string" // } // }, // "path": "b/{bucket}", // "request": { // "$ref": "Bucket" // }, // "response": { // "$ref": "Bucket" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } // method id "storage.buckets.update": type BucketsUpdateCall struct { s *Service bucket string bucket2 *Bucket opt_ map[string]interface{} } // Update: Updates a bucket. func (r *BucketsService) Update(bucket string, bucket2 *Bucket) *BucketsUpdateCall { c := &BucketsUpdateCall{s: r.s, opt_: make(map[string]interface{})} c.bucket = bucket c.bucket2 = bucket2 return c } // IfMetagenerationMatch sets the optional parameter // "ifMetagenerationMatch": Makes the return of the bucket metadata // conditional on whether the bucket's current metageneration matches // the given value. func (c *BucketsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsUpdateCall { c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch return c } // IfMetagenerationNotMatch sets the optional parameter // "ifMetagenerationNotMatch": Makes the return of the bucket metadata // conditional on whether the bucket's current metageneration does not // match the given value. func (c *BucketsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsUpdateCall { c.opt_["ifMetagenerationNotMatch"] = ifMetagenerationNotMatch return c } // PredefinedAcl sets the optional parameter "predefinedAcl": Apply a // predefined set of access controls to this bucket. // // Possible values: // "authenticatedRead" - Project team owners get OWNER access, and // allAuthenticatedUsers get READER access. // "private" - Project team owners get OWNER access. // "projectPrivate" - Project team members get access according to // their roles. // "publicRead" - Project team owners get OWNER access, and allUsers // get READER access. // "publicReadWrite" - Project team owners get OWNER access, and // allUsers get WRITER access. func (c *BucketsUpdateCall) PredefinedAcl(predefinedAcl string) *BucketsUpdateCall { c.opt_["predefinedAcl"] = predefinedAcl return c } // PredefinedDefaultObjectAcl sets the optional parameter // "predefinedDefaultObjectAcl": Apply a predefined set of default // object access controls to this bucket. // // Possible values: // "authenticatedRead" - Object owner gets OWNER access, and // allAuthenticatedUsers get READER access. // "bucketOwnerFullControl" - Object owner gets OWNER access, and // project team owners get OWNER access. // "bucketOwnerRead" - Object owner gets OWNER access, and project // team owners get READER access. // "private" - Object owner gets OWNER access. // "projectPrivate" - Object owner gets OWNER access, and project team // members get access according to their roles. // "publicRead" - Object owner gets OWNER access, and allUsers get // READER access. func (c *BucketsUpdateCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsUpdateCall { c.opt_["predefinedDefaultObjectAcl"] = predefinedDefaultObjectAcl return c } // Projection sets the optional parameter "projection": Set of // properties to return. Defaults to full. // // Possible values: // "full" - Include all properties. // "noAcl" - Omit acl and defaultObjectAcl properties. func (c *BucketsUpdateCall) Projection(projection string) *BucketsUpdateCall { c.opt_["projection"] = projection return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketsUpdateCall) Fields(s ...googleapi.Field) *BucketsUpdateCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *BucketsUpdateCall) Do() (*Bucket, error) { var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) if err != nil { return nil, err } ctype := "application/json" params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["ifMetagenerationMatch"]; ok { params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifMetagenerationNotMatch"]; ok { params.Set("ifMetagenerationNotMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["predefinedAcl"]; ok { params.Set("predefinedAcl", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["predefinedDefaultObjectAcl"]; ok { params.Set("predefinedDefaultObjectAcl", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["projection"]; ok { params.Set("projection", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") urls += "?" + params.Encode() req, _ := http.NewRequest("PUT", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) req.Header.Set("Content-Type", ctype) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *Bucket if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Updates a bucket.", // "httpMethod": "PUT", // "id": "storage.buckets.update", // "parameterOrder": [ // "bucket" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "ifMetagenerationMatch": { // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationNotMatch": { // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "predefinedAcl": { // "description": "Apply a predefined set of access controls to this bucket.", // "enum": [ // "authenticatedRead", // "private", // "projectPrivate", // "publicRead", // "publicReadWrite" // ], // "enumDescriptions": [ // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", // "Project team owners get OWNER access.", // "Project team members get access according to their roles.", // "Project team owners get OWNER access, and allUsers get READER access.", // "Project team owners get OWNER access, and allUsers get WRITER access." // ], // "location": "query", // "type": "string" // }, // "predefinedDefaultObjectAcl": { // "description": "Apply a predefined set of default object access controls to this bucket.", // "enum": [ // "authenticatedRead", // "bucketOwnerFullControl", // "bucketOwnerRead", // "private", // "projectPrivate", // "publicRead" // ], // "enumDescriptions": [ // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", // "Object owner gets OWNER access, and project team owners get OWNER access.", // "Object owner gets OWNER access, and project team owners get READER access.", // "Object owner gets OWNER access.", // "Object owner gets OWNER access, and project team members get access according to their roles.", // "Object owner gets OWNER access, and allUsers get READER access." // ], // "location": "query", // "type": "string" // }, // "projection": { // "description": "Set of properties to return. Defaults to full.", // "enum": [ // "full", // "noAcl" // ], // "enumDescriptions": [ // "Include all properties.", // "Omit acl and defaultObjectAcl properties." // ], // "location": "query", // "type": "string" // } // }, // "path": "b/{bucket}", // "request": { // "$ref": "Bucket" // }, // "response": { // "$ref": "Bucket" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } // method id "storage.channels.stop": type ChannelsStopCall struct { s *Service channel *Channel opt_ map[string]interface{} } // Stop: Stop watching resources through this channel func (r *ChannelsService) Stop(channel *Channel) *ChannelsStopCall { c := &ChannelsStopCall{s: r.s, opt_: make(map[string]interface{})} c.channel = channel return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ChannelsStopCall) Fields(s ...googleapi.Field) *ChannelsStopCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ChannelsStopCall) Do() error { var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) if err != nil { return err } ctype := "application/json" params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "channels/stop") urls += "?" + params.Encode() req, _ := http.NewRequest("POST", urls, body) googleapi.SetOpaque(req.URL) req.Header.Set("Content-Type", ctype) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return err } return nil // { // "description": "Stop watching resources through this channel", // "httpMethod": "POST", // "id": "storage.channels.stop", // "path": "channels/stop", // "request": { // "$ref": "Channel", // "parameterName": "resource" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_only", // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } // method id "storage.defaultObjectAccessControls.delete": type DefaultObjectAccessControlsDeleteCall struct { s *Service bucket string entity string opt_ map[string]interface{} } // Delete: Permanently deletes the default object ACL entry for the // specified entity on the specified bucket. func (r *DefaultObjectAccessControlsService) Delete(bucket string, entity string) *DefaultObjectAccessControlsDeleteCall { c := &DefaultObjectAccessControlsDeleteCall{s: r.s, opt_: make(map[string]interface{})} c.bucket = bucket c.entity = entity return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *DefaultObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsDeleteCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *DefaultObjectAccessControlsDeleteCall) Do() error { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") urls += "?" + params.Encode() req, _ := http.NewRequest("DELETE", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "entity": c.entity, }) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return err } return nil // { // "description": "Permanently deletes the default object ACL entry for the specified entity on the specified bucket.", // "httpMethod": "DELETE", // "id": "storage.defaultObjectAccessControls.delete", // "parameterOrder": [ // "bucket", // "entity" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "entity": { // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/defaultObjectAcl/{entity}", // "scopes": [ // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.defaultObjectAccessControls.get": type DefaultObjectAccessControlsGetCall struct { s *Service bucket string entity string opt_ map[string]interface{} } // Get: Returns the default object ACL entry for the specified entity on // the specified bucket. func (r *DefaultObjectAccessControlsService) Get(bucket string, entity string) *DefaultObjectAccessControlsGetCall { c := &DefaultObjectAccessControlsGetCall{s: r.s, opt_: make(map[string]interface{})} c.bucket = bucket c.entity = entity return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *DefaultObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsGetCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *DefaultObjectAccessControlsGetCall) Do() (*ObjectAccessControl, error) { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") urls += "?" + params.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "entity": c.entity, }) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *ObjectAccessControl if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Returns the default object ACL entry for the specified entity on the specified bucket.", // "httpMethod": "GET", // "id": "storage.defaultObjectAccessControls.get", // "parameterOrder": [ // "bucket", // "entity" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "entity": { // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/defaultObjectAcl/{entity}", // "response": { // "$ref": "ObjectAccessControl" // }, // "scopes": [ // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.defaultObjectAccessControls.insert": type DefaultObjectAccessControlsInsertCall struct { s *Service bucket string objectaccesscontrol *ObjectAccessControl opt_ map[string]interface{} } // Insert: Creates a new default object ACL entry on the specified // bucket. func (r *DefaultObjectAccessControlsService) Insert(bucket string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsInsertCall { c := &DefaultObjectAccessControlsInsertCall{s: r.s, opt_: make(map[string]interface{})} c.bucket = bucket c.objectaccesscontrol = objectaccesscontrol return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *DefaultObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsInsertCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *DefaultObjectAccessControlsInsertCall) Do() (*ObjectAccessControl, error) { var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { return nil, err } ctype := "application/json" params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") urls += "?" + params.Encode() req, _ := http.NewRequest("POST", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) req.Header.Set("Content-Type", ctype) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *ObjectAccessControl if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Creates a new default object ACL entry on the specified bucket.", // "httpMethod": "POST", // "id": "storage.defaultObjectAccessControls.insert", // "parameterOrder": [ // "bucket" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/defaultObjectAcl", // "request": { // "$ref": "ObjectAccessControl" // }, // "response": { // "$ref": "ObjectAccessControl" // }, // "scopes": [ // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.defaultObjectAccessControls.list": type DefaultObjectAccessControlsListCall struct { s *Service bucket string opt_ map[string]interface{} } // List: Retrieves default object ACL entries on the specified bucket. func (r *DefaultObjectAccessControlsService) List(bucket string) *DefaultObjectAccessControlsListCall { c := &DefaultObjectAccessControlsListCall{s: r.s, opt_: make(map[string]interface{})} c.bucket = bucket return c } // IfMetagenerationMatch sets the optional parameter // "ifMetagenerationMatch": If present, only return default ACL listing // if the bucket's current metageneration matches this value. func (c *DefaultObjectAccessControlsListCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *DefaultObjectAccessControlsListCall { c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch return c } // IfMetagenerationNotMatch sets the optional parameter // "ifMetagenerationNotMatch": If present, only return default ACL // listing if the bucket's current metageneration does not match the // given value. func (c *DefaultObjectAccessControlsListCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *DefaultObjectAccessControlsListCall { c.opt_["ifMetagenerationNotMatch"] = ifMetagenerationNotMatch return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *DefaultObjectAccessControlsListCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsListCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *DefaultObjectAccessControlsListCall) Do() (*ObjectAccessControls, error) { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["ifMetagenerationMatch"]; ok { params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifMetagenerationNotMatch"]; ok { params.Set("ifMetagenerationNotMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") urls += "?" + params.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *ObjectAccessControls if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Retrieves default object ACL entries on the specified bucket.", // "httpMethod": "GET", // "id": "storage.defaultObjectAccessControls.list", // "parameterOrder": [ // "bucket" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "ifMetagenerationMatch": { // "description": "If present, only return default ACL listing if the bucket's current metageneration matches this value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationNotMatch": { // "description": "If present, only return default ACL listing if the bucket's current metageneration does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // } // }, // "path": "b/{bucket}/defaultObjectAcl", // "response": { // "$ref": "ObjectAccessControls" // }, // "scopes": [ // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.defaultObjectAccessControls.patch": type DefaultObjectAccessControlsPatchCall struct { s *Service bucket string entity string objectaccesscontrol *ObjectAccessControl opt_ map[string]interface{} } // Patch: Updates a default object ACL entry on the specified bucket. // This method supports patch semantics. func (r *DefaultObjectAccessControlsService) Patch(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsPatchCall { c := &DefaultObjectAccessControlsPatchCall{s: r.s, opt_: make(map[string]interface{})} c.bucket = bucket c.entity = entity c.objectaccesscontrol = objectaccesscontrol return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *DefaultObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsPatchCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *DefaultObjectAccessControlsPatchCall) Do() (*ObjectAccessControl, error) { var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { return nil, err } ctype := "application/json" params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") urls += "?" + params.Encode() req, _ := http.NewRequest("PATCH", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "entity": c.entity, }) req.Header.Set("Content-Type", ctype) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *ObjectAccessControl if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Updates a default object ACL entry on the specified bucket. This method supports patch semantics.", // "httpMethod": "PATCH", // "id": "storage.defaultObjectAccessControls.patch", // "parameterOrder": [ // "bucket", // "entity" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "entity": { // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/defaultObjectAcl/{entity}", // "request": { // "$ref": "ObjectAccessControl" // }, // "response": { // "$ref": "ObjectAccessControl" // }, // "scopes": [ // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.defaultObjectAccessControls.update": type DefaultObjectAccessControlsUpdateCall struct { s *Service bucket string entity string objectaccesscontrol *ObjectAccessControl opt_ map[string]interface{} } // Update: Updates a default object ACL entry on the specified bucket. func (r *DefaultObjectAccessControlsService) Update(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsUpdateCall { c := &DefaultObjectAccessControlsUpdateCall{s: r.s, opt_: make(map[string]interface{})} c.bucket = bucket c.entity = entity c.objectaccesscontrol = objectaccesscontrol return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *DefaultObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsUpdateCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *DefaultObjectAccessControlsUpdateCall) Do() (*ObjectAccessControl, error) { var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { return nil, err } ctype := "application/json" params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") urls += "?" + params.Encode() req, _ := http.NewRequest("PUT", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "entity": c.entity, }) req.Header.Set("Content-Type", ctype) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *ObjectAccessControl if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Updates a default object ACL entry on the specified bucket.", // "httpMethod": "PUT", // "id": "storage.defaultObjectAccessControls.update", // "parameterOrder": [ // "bucket", // "entity" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "entity": { // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/defaultObjectAcl/{entity}", // "request": { // "$ref": "ObjectAccessControl" // }, // "response": { // "$ref": "ObjectAccessControl" // }, // "scopes": [ // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.objectAccessControls.delete": type ObjectAccessControlsDeleteCall struct { s *Service bucket string object string entity string opt_ map[string]interface{} } // Delete: Permanently deletes the ACL entry for the specified entity on // the specified object. func (r *ObjectAccessControlsService) Delete(bucket string, object string, entity string) *ObjectAccessControlsDeleteCall { c := &ObjectAccessControlsDeleteCall{s: r.s, opt_: make(map[string]interface{})} c.bucket = bucket c.object = object c.entity = entity return c } // Generation sets the optional parameter "generation": If present, // selects a specific revision of this object (as opposed to the latest // version, the default). func (c *ObjectAccessControlsDeleteCall) Generation(generation int64) *ObjectAccessControlsDeleteCall { c.opt_["generation"] = generation return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *ObjectAccessControlsDeleteCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ObjectAccessControlsDeleteCall) Do() error { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["generation"]; ok { params.Set("generation", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") urls += "?" + params.Encode() req, _ := http.NewRequest("DELETE", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, "entity": c.entity, }) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return err } return nil // { // "description": "Permanently deletes the ACL entry for the specified entity on the specified object.", // "httpMethod": "DELETE", // "id": "storage.objectAccessControls.delete", // "parameterOrder": [ // "bucket", // "object", // "entity" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "entity": { // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", // "location": "path", // "required": true, // "type": "string" // }, // "generation": { // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", // "format": "int64", // "location": "query", // "type": "string" // }, // "object": { // "description": "Name of the object.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/o/{object}/acl/{entity}", // "scopes": [ // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.objectAccessControls.get": type ObjectAccessControlsGetCall struct { s *Service bucket string object string entity string opt_ map[string]interface{} } // Get: Returns the ACL entry for the specified entity on the specified // object. func (r *ObjectAccessControlsService) Get(bucket string, object string, entity string) *ObjectAccessControlsGetCall { c := &ObjectAccessControlsGetCall{s: r.s, opt_: make(map[string]interface{})} c.bucket = bucket c.object = object c.entity = entity return c } // Generation sets the optional parameter "generation": If present, // selects a specific revision of this object (as opposed to the latest // version, the default). func (c *ObjectAccessControlsGetCall) Generation(generation int64) *ObjectAccessControlsGetCall { c.opt_["generation"] = generation return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *ObjectAccessControlsGetCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ObjectAccessControlsGetCall) Do() (*ObjectAccessControl, error) { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["generation"]; ok { params.Set("generation", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") urls += "?" + params.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, "entity": c.entity, }) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *ObjectAccessControl if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Returns the ACL entry for the specified entity on the specified object.", // "httpMethod": "GET", // "id": "storage.objectAccessControls.get", // "parameterOrder": [ // "bucket", // "object", // "entity" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "entity": { // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", // "location": "path", // "required": true, // "type": "string" // }, // "generation": { // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", // "format": "int64", // "location": "query", // "type": "string" // }, // "object": { // "description": "Name of the object.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/o/{object}/acl/{entity}", // "response": { // "$ref": "ObjectAccessControl" // }, // "scopes": [ // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.objectAccessControls.insert": type ObjectAccessControlsInsertCall struct { s *Service bucket string object string objectaccesscontrol *ObjectAccessControl opt_ map[string]interface{} } // Insert: Creates a new ACL entry on the specified object. func (r *ObjectAccessControlsService) Insert(bucket string, object string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsInsertCall { c := &ObjectAccessControlsInsertCall{s: r.s, opt_: make(map[string]interface{})} c.bucket = bucket c.object = object c.objectaccesscontrol = objectaccesscontrol return c } // Generation sets the optional parameter "generation": If present, // selects a specific revision of this object (as opposed to the latest // version, the default). func (c *ObjectAccessControlsInsertCall) Generation(generation int64) *ObjectAccessControlsInsertCall { c.opt_["generation"] = generation return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *ObjectAccessControlsInsertCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ObjectAccessControlsInsertCall) Do() (*ObjectAccessControl, error) { var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { return nil, err } ctype := "application/json" params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["generation"]; ok { params.Set("generation", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") urls += "?" + params.Encode() req, _ := http.NewRequest("POST", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, }) req.Header.Set("Content-Type", ctype) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *ObjectAccessControl if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Creates a new ACL entry on the specified object.", // "httpMethod": "POST", // "id": "storage.objectAccessControls.insert", // "parameterOrder": [ // "bucket", // "object" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "generation": { // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", // "format": "int64", // "location": "query", // "type": "string" // }, // "object": { // "description": "Name of the object.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/o/{object}/acl", // "request": { // "$ref": "ObjectAccessControl" // }, // "response": { // "$ref": "ObjectAccessControl" // }, // "scopes": [ // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.objectAccessControls.list": type ObjectAccessControlsListCall struct { s *Service bucket string object string opt_ map[string]interface{} } // List: Retrieves ACL entries on the specified object. func (r *ObjectAccessControlsService) List(bucket string, object string) *ObjectAccessControlsListCall { c := &ObjectAccessControlsListCall{s: r.s, opt_: make(map[string]interface{})} c.bucket = bucket c.object = object return c } // Generation sets the optional parameter "generation": If present, // selects a specific revision of this object (as opposed to the latest // version, the default). func (c *ObjectAccessControlsListCall) Generation(generation int64) *ObjectAccessControlsListCall { c.opt_["generation"] = generation return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectAccessControlsListCall) Fields(s ...googleapi.Field) *ObjectAccessControlsListCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ObjectAccessControlsListCall) Do() (*ObjectAccessControls, error) { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["generation"]; ok { params.Set("generation", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") urls += "?" + params.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, }) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *ObjectAccessControls if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Retrieves ACL entries on the specified object.", // "httpMethod": "GET", // "id": "storage.objectAccessControls.list", // "parameterOrder": [ // "bucket", // "object" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "generation": { // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", // "format": "int64", // "location": "query", // "type": "string" // }, // "object": { // "description": "Name of the object.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/o/{object}/acl", // "response": { // "$ref": "ObjectAccessControls" // }, // "scopes": [ // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.objectAccessControls.patch": type ObjectAccessControlsPatchCall struct { s *Service bucket string object string entity string objectaccesscontrol *ObjectAccessControl opt_ map[string]interface{} } // Patch: Updates an ACL entry on the specified object. This method // supports patch semantics. func (r *ObjectAccessControlsService) Patch(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsPatchCall { c := &ObjectAccessControlsPatchCall{s: r.s, opt_: make(map[string]interface{})} c.bucket = bucket c.object = object c.entity = entity c.objectaccesscontrol = objectaccesscontrol return c } // Generation sets the optional parameter "generation": If present, // selects a specific revision of this object (as opposed to the latest // version, the default). func (c *ObjectAccessControlsPatchCall) Generation(generation int64) *ObjectAccessControlsPatchCall { c.opt_["generation"] = generation return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *ObjectAccessControlsPatchCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ObjectAccessControlsPatchCall) Do() (*ObjectAccessControl, error) { var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { return nil, err } ctype := "application/json" params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["generation"]; ok { params.Set("generation", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") urls += "?" + params.Encode() req, _ := http.NewRequest("PATCH", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, "entity": c.entity, }) req.Header.Set("Content-Type", ctype) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *ObjectAccessControl if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Updates an ACL entry on the specified object. This method supports patch semantics.", // "httpMethod": "PATCH", // "id": "storage.objectAccessControls.patch", // "parameterOrder": [ // "bucket", // "object", // "entity" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "entity": { // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", // "location": "path", // "required": true, // "type": "string" // }, // "generation": { // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", // "format": "int64", // "location": "query", // "type": "string" // }, // "object": { // "description": "Name of the object.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/o/{object}/acl/{entity}", // "request": { // "$ref": "ObjectAccessControl" // }, // "response": { // "$ref": "ObjectAccessControl" // }, // "scopes": [ // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.objectAccessControls.update": type ObjectAccessControlsUpdateCall struct { s *Service bucket string object string entity string objectaccesscontrol *ObjectAccessControl opt_ map[string]interface{} } // Update: Updates an ACL entry on the specified object. func (r *ObjectAccessControlsService) Update(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsUpdateCall { c := &ObjectAccessControlsUpdateCall{s: r.s, opt_: make(map[string]interface{})} c.bucket = bucket c.object = object c.entity = entity c.objectaccesscontrol = objectaccesscontrol return c } // Generation sets the optional parameter "generation": If present, // selects a specific revision of this object (as opposed to the latest // version, the default). func (c *ObjectAccessControlsUpdateCall) Generation(generation int64) *ObjectAccessControlsUpdateCall { c.opt_["generation"] = generation return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *ObjectAccessControlsUpdateCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ObjectAccessControlsUpdateCall) Do() (*ObjectAccessControl, error) { var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { return nil, err } ctype := "application/json" params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["generation"]; ok { params.Set("generation", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") urls += "?" + params.Encode() req, _ := http.NewRequest("PUT", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, "entity": c.entity, }) req.Header.Set("Content-Type", ctype) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *ObjectAccessControl if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Updates an ACL entry on the specified object.", // "httpMethod": "PUT", // "id": "storage.objectAccessControls.update", // "parameterOrder": [ // "bucket", // "object", // "entity" // ], // "parameters": { // "bucket": { // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, // "entity": { // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", // "location": "path", // "required": true, // "type": "string" // }, // "generation": { // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", // "format": "int64", // "location": "query", // "type": "string" // }, // "object": { // "description": "Name of the object.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/o/{object}/acl/{entity}", // "request": { // "$ref": "ObjectAccessControl" // }, // "response": { // "$ref": "ObjectAccessControl" // }, // "scopes": [ // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } // method id "storage.objects.compose": type ObjectsComposeCall struct { s *Service destinationBucket string destinationObject string composerequest *ComposeRequest opt_ map[string]interface{} } // Compose: Concatenates a list of existing objects into a new object in // the same bucket. func (r *ObjectsService) Compose(destinationBucket string, destinationObject string, composerequest *ComposeRequest) *ObjectsComposeCall { c := &ObjectsComposeCall{s: r.s, opt_: make(map[string]interface{})} c.destinationBucket = destinationBucket c.destinationObject = destinationObject c.composerequest = composerequest return c } // DestinationPredefinedAcl sets the optional parameter // "destinationPredefinedAcl": Apply a predefined set of access controls // to the destination object. // // Possible values: // "authenticatedRead" - Object owner gets OWNER access, and // allAuthenticatedUsers get READER access. // "bucketOwnerFullControl" - Object owner gets OWNER access, and // project team owners get OWNER access. // "bucketOwnerRead" - Object owner gets OWNER access, and project // team owners get READER access. // "private" - Object owner gets OWNER access. // "projectPrivate" - Object owner gets OWNER access, and project team // members get access according to their roles. // "publicRead" - Object owner gets OWNER access, and allUsers get // READER access. func (c *ObjectsComposeCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsComposeCall { c.opt_["destinationPredefinedAcl"] = destinationPredefinedAcl return c } // IfGenerationMatch sets the optional parameter "ifGenerationMatch": // Makes the operation conditional on whether the object's current // generation matches the given value. func (c *ObjectsComposeCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsComposeCall { c.opt_["ifGenerationMatch"] = ifGenerationMatch return c } // IfMetagenerationMatch sets the optional parameter // "ifMetagenerationMatch": Makes the operation conditional on whether // the object's current metageneration matches the given value. func (c *ObjectsComposeCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsComposeCall { c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectsComposeCall) Fields(s ...googleapi.Field) *ObjectsComposeCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ObjectsComposeCall) Do() (*Object, error) { var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.composerequest) if err != nil { return nil, err } ctype := "application/json" params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["destinationPredefinedAcl"]; ok { params.Set("destinationPredefinedAcl", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifGenerationMatch"]; ok { params.Set("ifGenerationMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifMetagenerationMatch"]; ok { params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "b/{destinationBucket}/o/{destinationObject}/compose") urls += "?" + params.Encode() req, _ := http.NewRequest("POST", urls, body) googleapi.Expand(req.URL, map[string]string{ "destinationBucket": c.destinationBucket, "destinationObject": c.destinationObject, }) req.Header.Set("Content-Type", ctype) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *Object if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Concatenates a list of existing objects into a new object in the same bucket.", // "httpMethod": "POST", // "id": "storage.objects.compose", // "parameterOrder": [ // "destinationBucket", // "destinationObject" // ], // "parameters": { // "destinationBucket": { // "description": "Name of the bucket in which to store the new object.", // "location": "path", // "required": true, // "type": "string" // }, // "destinationObject": { // "description": "Name of the new object.", // "location": "path", // "required": true, // "type": "string" // }, // "destinationPredefinedAcl": { // "description": "Apply a predefined set of access controls to the destination object.", // "enum": [ // "authenticatedRead", // "bucketOwnerFullControl", // "bucketOwnerRead", // "private", // "projectPrivate", // "publicRead" // ], // "enumDescriptions": [ // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", // "Object owner gets OWNER access, and project team owners get OWNER access.", // "Object owner gets OWNER access, and project team owners get READER access.", // "Object owner gets OWNER access.", // "Object owner gets OWNER access, and project team members get access according to their roles.", // "Object owner gets OWNER access, and allUsers get READER access." // ], // "location": "query", // "type": "string" // }, // "ifGenerationMatch": { // "description": "Makes the operation conditional on whether the object's current generation matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationMatch": { // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // } // }, // "path": "b/{destinationBucket}/o/{destinationObject}/compose", // "request": { // "$ref": "ComposeRequest" // }, // "response": { // "$ref": "Object" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_write" // ], // "supportsMediaDownload": true // } } // method id "storage.objects.copy": type ObjectsCopyCall struct { s *Service sourceBucket string sourceObject string destinationBucket string destinationObject string object *Object opt_ map[string]interface{} } // Copy: Copies a source object to a destination object. Optionally // overrides metadata. func (r *ObjectsService) Copy(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsCopyCall { c := &ObjectsCopyCall{s: r.s, opt_: make(map[string]interface{})} c.sourceBucket = sourceBucket c.sourceObject = sourceObject c.destinationBucket = destinationBucket c.destinationObject = destinationObject c.object = object return c } // DestinationPredefinedAcl sets the optional parameter // "destinationPredefinedAcl": Apply a predefined set of access controls // to the destination object. // // Possible values: // "authenticatedRead" - Object owner gets OWNER access, and // allAuthenticatedUsers get READER access. // "bucketOwnerFullControl" - Object owner gets OWNER access, and // project team owners get OWNER access. // "bucketOwnerRead" - Object owner gets OWNER access, and project // team owners get READER access. // "private" - Object owner gets OWNER access. // "projectPrivate" - Object owner gets OWNER access, and project team // members get access according to their roles. // "publicRead" - Object owner gets OWNER access, and allUsers get // READER access. func (c *ObjectsCopyCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsCopyCall { c.opt_["destinationPredefinedAcl"] = destinationPredefinedAcl return c } // IfGenerationMatch sets the optional parameter "ifGenerationMatch": // Makes the operation conditional on whether the destination object's // current generation matches the given value. func (c *ObjectsCopyCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsCopyCall { c.opt_["ifGenerationMatch"] = ifGenerationMatch return c } // IfGenerationNotMatch sets the optional parameter // "ifGenerationNotMatch": Makes the operation conditional on whether // the destination object's current generation does not match the given // value. func (c *ObjectsCopyCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsCopyCall { c.opt_["ifGenerationNotMatch"] = ifGenerationNotMatch return c } // IfMetagenerationMatch sets the optional parameter // "ifMetagenerationMatch": Makes the operation conditional on whether // the destination object's current metageneration matches the given // value. func (c *ObjectsCopyCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsCopyCall { c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch return c } // IfMetagenerationNotMatch sets the optional parameter // "ifMetagenerationNotMatch": Makes the operation conditional on // whether the destination object's current metageneration does not // match the given value. func (c *ObjectsCopyCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsCopyCall { c.opt_["ifMetagenerationNotMatch"] = ifMetagenerationNotMatch return c } // IfSourceGenerationMatch sets the optional parameter // "ifSourceGenerationMatch": Makes the operation conditional on whether // the source object's generation matches the given value. func (c *ObjectsCopyCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsCopyCall { c.opt_["ifSourceGenerationMatch"] = ifSourceGenerationMatch return c } // IfSourceGenerationNotMatch sets the optional parameter // "ifSourceGenerationNotMatch": Makes the operation conditional on // whether the source object's generation does not match the given // value. func (c *ObjectsCopyCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsCopyCall { c.opt_["ifSourceGenerationNotMatch"] = ifSourceGenerationNotMatch return c } // IfSourceMetagenerationMatch sets the optional parameter // "ifSourceMetagenerationMatch": Makes the operation conditional on // whether the source object's current metageneration matches the given // value. func (c *ObjectsCopyCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsCopyCall { c.opt_["ifSourceMetagenerationMatch"] = ifSourceMetagenerationMatch return c } // IfSourceMetagenerationNotMatch sets the optional parameter // "ifSourceMetagenerationNotMatch": Makes the operation conditional on // whether the source object's current metageneration does not match the // given value. func (c *ObjectsCopyCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsCopyCall { c.opt_["ifSourceMetagenerationNotMatch"] = ifSourceMetagenerationNotMatch return c } // Projection sets the optional parameter "projection": Set of // properties to return. Defaults to noAcl, unless the object resource // specifies the acl property, when it defaults to full. // // Possible values: // "full" - Include all properties. // "noAcl" - Omit the acl property. func (c *ObjectsCopyCall) Projection(projection string) *ObjectsCopyCall { c.opt_["projection"] = projection return c } // SourceGeneration sets the optional parameter "sourceGeneration": If // present, selects a specific revision of the source object (as opposed // to the latest version, the default). func (c *ObjectsCopyCall) SourceGeneration(sourceGeneration int64) *ObjectsCopyCall { c.opt_["sourceGeneration"] = sourceGeneration return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectsCopyCall) Fields(s ...googleapi.Field) *ObjectsCopyCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ObjectsCopyCall) Do() (*Object, error) { var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) if err != nil { return nil, err } ctype := "application/json" params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["destinationPredefinedAcl"]; ok { params.Set("destinationPredefinedAcl", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifGenerationMatch"]; ok { params.Set("ifGenerationMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifGenerationNotMatch"]; ok { params.Set("ifGenerationNotMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifMetagenerationMatch"]; ok { params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifMetagenerationNotMatch"]; ok { params.Set("ifMetagenerationNotMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifSourceGenerationMatch"]; ok { params.Set("ifSourceGenerationMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifSourceGenerationNotMatch"]; ok { params.Set("ifSourceGenerationNotMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifSourceMetagenerationMatch"]; ok { params.Set("ifSourceMetagenerationMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifSourceMetagenerationNotMatch"]; ok { params.Set("ifSourceMetagenerationNotMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["projection"]; ok { params.Set("projection", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["sourceGeneration"]; ok { params.Set("sourceGeneration", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}") urls += "?" + params.Encode() req, _ := http.NewRequest("POST", urls, body) googleapi.Expand(req.URL, map[string]string{ "sourceBucket": c.sourceBucket, "sourceObject": c.sourceObject, "destinationBucket": c.destinationBucket, "destinationObject": c.destinationObject, }) req.Header.Set("Content-Type", ctype) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *Object if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Copies a source object to a destination object. Optionally overrides metadata.", // "httpMethod": "POST", // "id": "storage.objects.copy", // "parameterOrder": [ // "sourceBucket", // "sourceObject", // "destinationBucket", // "destinationObject" // ], // "parameters": { // "destinationBucket": { // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", // "location": "path", // "required": true, // "type": "string" // }, // "destinationObject": { // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.", // "location": "path", // "required": true, // "type": "string" // }, // "destinationPredefinedAcl": { // "description": "Apply a predefined set of access controls to the destination object.", // "enum": [ // "authenticatedRead", // "bucketOwnerFullControl", // "bucketOwnerRead", // "private", // "projectPrivate", // "publicRead" // ], // "enumDescriptions": [ // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", // "Object owner gets OWNER access, and project team owners get OWNER access.", // "Object owner gets OWNER access, and project team owners get READER access.", // "Object owner gets OWNER access.", // "Object owner gets OWNER access, and project team members get access according to their roles.", // "Object owner gets OWNER access, and allUsers get READER access." // ], // "location": "query", // "type": "string" // }, // "ifGenerationMatch": { // "description": "Makes the operation conditional on whether the destination object's current generation matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifGenerationNotMatch": { // "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationMatch": { // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationNotMatch": { // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifSourceGenerationMatch": { // "description": "Makes the operation conditional on whether the source object's generation matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifSourceGenerationNotMatch": { // "description": "Makes the operation conditional on whether the source object's generation does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifSourceMetagenerationMatch": { // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifSourceMetagenerationNotMatch": { // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "projection": { // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", // "enum": [ // "full", // "noAcl" // ], // "enumDescriptions": [ // "Include all properties.", // "Omit the acl property." // ], // "location": "query", // "type": "string" // }, // "sourceBucket": { // "description": "Name of the bucket in which to find the source object.", // "location": "path", // "required": true, // "type": "string" // }, // "sourceGeneration": { // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", // "format": "int64", // "location": "query", // "type": "string" // }, // "sourceObject": { // "description": "Name of the source object.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}", // "request": { // "$ref": "Object" // }, // "response": { // "$ref": "Object" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_write" // ], // "supportsMediaDownload": true // } } // method id "storage.objects.delete": type ObjectsDeleteCall struct { s *Service bucket string object string opt_ map[string]interface{} } // Delete: Deletes an object and its metadata. Deletions are permanent // if versioning is not enabled for the bucket, or if the generation // parameter is used. func (r *ObjectsService) Delete(bucket string, object string) *ObjectsDeleteCall { c := &ObjectsDeleteCall{s: r.s, opt_: make(map[string]interface{})} c.bucket = bucket c.object = object return c } // Generation sets the optional parameter "generation": If present, // permanently deletes a specific revision of this object (as opposed to // the latest version, the default). func (c *ObjectsDeleteCall) Generation(generation int64) *ObjectsDeleteCall { c.opt_["generation"] = generation return c } // IfGenerationMatch sets the optional parameter "ifGenerationMatch": // Makes the operation conditional on whether the object's current // generation matches the given value. func (c *ObjectsDeleteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsDeleteCall { c.opt_["ifGenerationMatch"] = ifGenerationMatch return c } // IfGenerationNotMatch sets the optional parameter // "ifGenerationNotMatch": Makes the operation conditional on whether // the object's current generation does not match the given value. func (c *ObjectsDeleteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsDeleteCall { c.opt_["ifGenerationNotMatch"] = ifGenerationNotMatch return c } // IfMetagenerationMatch sets the optional parameter // "ifMetagenerationMatch": Makes the operation conditional on whether // the object's current metageneration matches the given value. func (c *ObjectsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsDeleteCall { c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch return c } // IfMetagenerationNotMatch sets the optional parameter // "ifMetagenerationNotMatch": Makes the operation conditional on // whether the object's current metageneration does not match the given // value. func (c *ObjectsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsDeleteCall { c.opt_["ifMetagenerationNotMatch"] = ifMetagenerationNotMatch return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectsDeleteCall) Fields(s ...googleapi.Field) *ObjectsDeleteCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ObjectsDeleteCall) Do() error { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["generation"]; ok { params.Set("generation", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifGenerationMatch"]; ok { params.Set("ifGenerationMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifGenerationNotMatch"]; ok { params.Set("ifGenerationNotMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifMetagenerationMatch"]; ok { params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifMetagenerationNotMatch"]; ok { params.Set("ifMetagenerationNotMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") urls += "?" + params.Encode() req, _ := http.NewRequest("DELETE", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, }) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return err } return nil // { // "description": "Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.", // "httpMethod": "DELETE", // "id": "storage.objects.delete", // "parameterOrder": [ // "bucket", // "object" // ], // "parameters": { // "bucket": { // "description": "Name of the bucket in which the object resides.", // "location": "path", // "required": true, // "type": "string" // }, // "generation": { // "description": "If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifGenerationMatch": { // "description": "Makes the operation conditional on whether the object's current generation matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifGenerationNotMatch": { // "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationMatch": { // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationNotMatch": { // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "object": { // "description": "Name of the object.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{bucket}/o/{object}", // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } // method id "storage.objects.get": type ObjectsGetCall struct { s *Service bucket string object string opt_ map[string]interface{} } // Get: Retrieves an object or its metadata. func (r *ObjectsService) Get(bucket string, object string) *ObjectsGetCall { c := &ObjectsGetCall{s: r.s, opt_: make(map[string]interface{})} c.bucket = bucket c.object = object return c } // Generation sets the optional parameter "generation": If present, // selects a specific revision of this object (as opposed to the latest // version, the default). func (c *ObjectsGetCall) Generation(generation int64) *ObjectsGetCall { c.opt_["generation"] = generation return c } // IfGenerationMatch sets the optional parameter "ifGenerationMatch": // Makes the operation conditional on whether the object's generation // matches the given value. func (c *ObjectsGetCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsGetCall { c.opt_["ifGenerationMatch"] = ifGenerationMatch return c } // IfGenerationNotMatch sets the optional parameter // "ifGenerationNotMatch": Makes the operation conditional on whether // the object's generation does not match the given value. func (c *ObjectsGetCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsGetCall { c.opt_["ifGenerationNotMatch"] = ifGenerationNotMatch return c } // IfMetagenerationMatch sets the optional parameter // "ifMetagenerationMatch": Makes the operation conditional on whether // the object's current metageneration matches the given value. func (c *ObjectsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsGetCall { c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch return c } // IfMetagenerationNotMatch sets the optional parameter // "ifMetagenerationNotMatch": Makes the operation conditional on // whether the object's current metageneration does not match the given // value. func (c *ObjectsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsGetCall { c.opt_["ifMetagenerationNotMatch"] = ifMetagenerationNotMatch return c } // Projection sets the optional parameter "projection": Set of // properties to return. Defaults to noAcl. // // Possible values: // "full" - Include all properties. // "noAcl" - Omit the acl property. func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall { c.opt_["projection"] = projection return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectsGetCall) Fields(s ...googleapi.Field) *ObjectsGetCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ObjectsGetCall) Do() (*Object, error) { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["generation"]; ok { params.Set("generation", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifGenerationMatch"]; ok { params.Set("ifGenerationMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifGenerationNotMatch"]; ok { params.Set("ifGenerationNotMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifMetagenerationMatch"]; ok { params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifMetagenerationNotMatch"]; ok { params.Set("ifMetagenerationNotMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["projection"]; ok { params.Set("projection", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") urls += "?" + params.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, }) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *Object if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Retrieves an object or its metadata.", // "httpMethod": "GET", // "id": "storage.objects.get", // "parameterOrder": [ // "bucket", // "object" // ], // "parameters": { // "bucket": { // "description": "Name of the bucket in which the object resides.", // "location": "path", // "required": true, // "type": "string" // }, // "generation": { // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifGenerationMatch": { // "description": "Makes the operation conditional on whether the object's generation matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifGenerationNotMatch": { // "description": "Makes the operation conditional on whether the object's generation does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationMatch": { // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationNotMatch": { // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "object": { // "description": "Name of the object.", // "location": "path", // "required": true, // "type": "string" // }, // "projection": { // "description": "Set of properties to return. Defaults to noAcl.", // "enum": [ // "full", // "noAcl" // ], // "enumDescriptions": [ // "Include all properties.", // "Omit the acl property." // ], // "location": "query", // "type": "string" // } // }, // "path": "b/{bucket}/o/{object}", // "response": { // "$ref": "Object" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_only", // "https://www.googleapis.com/auth/devstorage.read_write" // ], // "supportsMediaDownload": true // } } // method id "storage.objects.insert": type ObjectsInsertCall struct { s *Service bucket string object *Object opt_ map[string]interface{} media_ io.Reader resumable_ googleapi.SizeReaderAt mediaType_ string ctx_ context.Context protocol_ string } // Insert: Stores a new object and metadata. func (r *ObjectsService) Insert(bucket string, object *Object) *ObjectsInsertCall { c := &ObjectsInsertCall{s: r.s, opt_: make(map[string]interface{})} c.bucket = bucket c.object = object return c } // ContentEncoding sets the optional parameter "contentEncoding": If // set, sets the contentEncoding property of the final object to this // value. Setting this parameter is equivalent to setting the // contentEncoding metadata property. This can be useful when uploading // an object with uploadType=media to indicate the encoding of the // content being uploaded. func (c *ObjectsInsertCall) ContentEncoding(contentEncoding string) *ObjectsInsertCall { c.opt_["contentEncoding"] = contentEncoding return c } // IfGenerationMatch sets the optional parameter "ifGenerationMatch": // Makes the operation conditional on whether the object's current // generation matches the given value. func (c *ObjectsInsertCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsInsertCall { c.opt_["ifGenerationMatch"] = ifGenerationMatch return c } // IfGenerationNotMatch sets the optional parameter // "ifGenerationNotMatch": Makes the operation conditional on whether // the object's current generation does not match the given value. func (c *ObjectsInsertCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsInsertCall { c.opt_["ifGenerationNotMatch"] = ifGenerationNotMatch return c } // IfMetagenerationMatch sets the optional parameter // "ifMetagenerationMatch": Makes the operation conditional on whether // the object's current metageneration matches the given value. func (c *ObjectsInsertCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsInsertCall { c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch return c } // IfMetagenerationNotMatch sets the optional parameter // "ifMetagenerationNotMatch": Makes the operation conditional on // whether the object's current metageneration does not match the given // value. func (c *ObjectsInsertCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsInsertCall { c.opt_["ifMetagenerationNotMatch"] = ifMetagenerationNotMatch return c } // Name sets the optional parameter "name": Name of the object. Required // when the object metadata is not otherwise provided. Overrides the // object metadata's name value, if any. func (c *ObjectsInsertCall) Name(name string) *ObjectsInsertCall { c.opt_["name"] = name return c } // PredefinedAcl sets the optional parameter "predefinedAcl": Apply a // predefined set of access controls to this object. // // Possible values: // "authenticatedRead" - Object owner gets OWNER access, and // allAuthenticatedUsers get READER access. // "bucketOwnerFullControl" - Object owner gets OWNER access, and // project team owners get OWNER access. // "bucketOwnerRead" - Object owner gets OWNER access, and project // team owners get READER access. // "private" - Object owner gets OWNER access. // "projectPrivate" - Object owner gets OWNER access, and project team // members get access according to their roles. // "publicRead" - Object owner gets OWNER access, and allUsers get // READER access. func (c *ObjectsInsertCall) PredefinedAcl(predefinedAcl string) *ObjectsInsertCall { c.opt_["predefinedAcl"] = predefinedAcl return c } // Projection sets the optional parameter "projection": Set of // properties to return. Defaults to noAcl, unless the object resource // specifies the acl property, when it defaults to full. // // Possible values: // "full" - Include all properties. // "noAcl" - Omit the acl property. func (c *ObjectsInsertCall) Projection(projection string) *ObjectsInsertCall { c.opt_["projection"] = projection return c } // Media specifies the media to upload in a single chunk. // At most one of Media and ResumableMedia may be set. func (c *ObjectsInsertCall) Media(r io.Reader) *ObjectsInsertCall { c.media_ = r c.protocol_ = "multipart" return c } // ResumableMedia specifies the media to upload in chunks and can be cancelled with ctx. // At most one of Media and ResumableMedia may be set. // mediaType identifies the MIME media type of the upload, such as "image/png". // If mediaType is "", it will be auto-detected. func (c *ObjectsInsertCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *ObjectsInsertCall { c.ctx_ = ctx c.resumable_ = io.NewSectionReader(r, 0, size) c.mediaType_ = mediaType c.protocol_ = "resumable" return c } // ProgressUpdater provides a callback function that will be called after every chunk. // It should be a low-latency function in order to not slow down the upload operation. // This should only be called when using ResumableMedia (as opposed to Media). func (c *ObjectsInsertCall) ProgressUpdater(pu googleapi.ProgressUpdater) *ObjectsInsertCall { c.opt_["progressUpdater"] = pu return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectsInsertCall) Fields(s ...googleapi.Field) *ObjectsInsertCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ObjectsInsertCall) Do() (*Object, error) { var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) if err != nil { return nil, err } ctype := "application/json" params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["contentEncoding"]; ok { params.Set("contentEncoding", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifGenerationMatch"]; ok { params.Set("ifGenerationMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifGenerationNotMatch"]; ok { params.Set("ifGenerationNotMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifMetagenerationMatch"]; ok { params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifMetagenerationNotMatch"]; ok { params.Set("ifMetagenerationNotMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["name"]; ok { params.Set("name", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["predefinedAcl"]; ok { params.Set("predefinedAcl", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["projection"]; ok { params.Set("projection", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") var progressUpdater_ googleapi.ProgressUpdater if v, ok := c.opt_["progressUpdater"]; ok { if pu, ok := v.(googleapi.ProgressUpdater); ok { progressUpdater_ = pu } } if c.media_ != nil || c.resumable_ != nil { urls = strings.Replace(urls, "https://www.googleapis.com/", "https://www.googleapis.com/upload/", 1) params.Set("uploadType", c.protocol_) } urls += "?" + params.Encode() if c.protocol_ != "resumable" { var cancel func() cancel, _ = googleapi.ConditionallyIncludeMedia(c.media_, &body, &ctype) if cancel != nil { defer cancel() } } req, _ := http.NewRequest("POST", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) if c.protocol_ == "resumable" { req.ContentLength = 0 if c.mediaType_ == "" { c.mediaType_ = googleapi.DetectMediaType(c.resumable_) } req.Header.Set("X-Upload-Content-Type", c.mediaType_) req.Body = nil } else { req.Header.Set("Content-Type", ctype) } req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } if c.protocol_ == "resumable" { loc := res.Header.Get("Location") rx := &googleapi.ResumableUpload{ Client: c.s.client, UserAgent: c.s.userAgent(), URI: loc, Media: c.resumable_, MediaType: c.mediaType_, ContentLength: c.resumable_.Size(), Callback: progressUpdater_, } res, err = rx.Upload(c.ctx_) if err != nil { return nil, err } defer res.Body.Close() } var ret *Object if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Stores a new object and metadata.", // "httpMethod": "POST", // "id": "storage.objects.insert", // "mediaUpload": { // "accept": [ // "*/*" // ], // "protocols": { // "resumable": { // "multipart": true, // "path": "/resumable/upload/storage/v1/b/{bucket}/o" // }, // "simple": { // "multipart": true, // "path": "/upload/storage/v1/b/{bucket}/o" // } // } // }, // "parameterOrder": [ // "bucket" // ], // "parameters": { // "bucket": { // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", // "location": "path", // "required": true, // "type": "string" // }, // "contentEncoding": { // "description": "If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.", // "location": "query", // "type": "string" // }, // "ifGenerationMatch": { // "description": "Makes the operation conditional on whether the object's current generation matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifGenerationNotMatch": { // "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationMatch": { // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationNotMatch": { // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "name": { // "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.", // "location": "query", // "type": "string" // }, // "predefinedAcl": { // "description": "Apply a predefined set of access controls to this object.", // "enum": [ // "authenticatedRead", // "bucketOwnerFullControl", // "bucketOwnerRead", // "private", // "projectPrivate", // "publicRead" // ], // "enumDescriptions": [ // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", // "Object owner gets OWNER access, and project team owners get OWNER access.", // "Object owner gets OWNER access, and project team owners get READER access.", // "Object owner gets OWNER access.", // "Object owner gets OWNER access, and project team members get access according to their roles.", // "Object owner gets OWNER access, and allUsers get READER access." // ], // "location": "query", // "type": "string" // }, // "projection": { // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", // "enum": [ // "full", // "noAcl" // ], // "enumDescriptions": [ // "Include all properties.", // "Omit the acl property." // ], // "location": "query", // "type": "string" // } // }, // "path": "b/{bucket}/o", // "request": { // "$ref": "Object" // }, // "response": { // "$ref": "Object" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_write" // ], // "supportsMediaDownload": true, // "supportsMediaUpload": true // } } // method id "storage.objects.list": type ObjectsListCall struct { s *Service bucket string opt_ map[string]interface{} } // List: Retrieves a list of objects matching the criteria. func (r *ObjectsService) List(bucket string) *ObjectsListCall { c := &ObjectsListCall{s: r.s, opt_: make(map[string]interface{})} c.bucket = bucket return c } // Delimiter sets the optional parameter "delimiter": Returns results in // a directory-like mode. items will contain only objects whose names, // aside from the prefix, do not contain delimiter. Objects whose names, // aside from the prefix, contain delimiter will have their name, // truncated after the delimiter, returned in prefixes. Duplicate // prefixes are omitted. func (c *ObjectsListCall) Delimiter(delimiter string) *ObjectsListCall { c.opt_["delimiter"] = delimiter return c } // MaxResults sets the optional parameter "maxResults": Maximum number // of items plus prefixes to return. As duplicate prefixes are omitted, // fewer total results may be returned than requested. The default value // of this parameter is 1,000 items. func (c *ObjectsListCall) MaxResults(maxResults int64) *ObjectsListCall { c.opt_["maxResults"] = maxResults return c } // PageToken sets the optional parameter "pageToken": A // previously-returned page token representing part of the larger set of // results to view. func (c *ObjectsListCall) PageToken(pageToken string) *ObjectsListCall { c.opt_["pageToken"] = pageToken return c } // Prefix sets the optional parameter "prefix": Filter results to // objects whose names begin with this prefix. func (c *ObjectsListCall) Prefix(prefix string) *ObjectsListCall { c.opt_["prefix"] = prefix return c } // Projection sets the optional parameter "projection": Set of // properties to return. Defaults to noAcl. // // Possible values: // "full" - Include all properties. // "noAcl" - Omit the acl property. func (c *ObjectsListCall) Projection(projection string) *ObjectsListCall { c.opt_["projection"] = projection return c } // Versions sets the optional parameter "versions": If true, lists all // versions of an object as distinct results. The default is false. For // more information, see Object Versioning. func (c *ObjectsListCall) Versions(versions bool) *ObjectsListCall { c.opt_["versions"] = versions return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectsListCall) Fields(s ...googleapi.Field) *ObjectsListCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ObjectsListCall) Do() (*Objects, error) { var body io.Reader = nil params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["delimiter"]; ok { params.Set("delimiter", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["maxResults"]; ok { params.Set("maxResults", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["pageToken"]; ok { params.Set("pageToken", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["prefix"]; ok { params.Set("prefix", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["projection"]; ok { params.Set("projection", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["versions"]; ok { params.Set("versions", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") urls += "?" + params.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *Objects if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Retrieves a list of objects matching the criteria.", // "httpMethod": "GET", // "id": "storage.objects.list", // "parameterOrder": [ // "bucket" // ], // "parameters": { // "bucket": { // "description": "Name of the bucket in which to look for objects.", // "location": "path", // "required": true, // "type": "string" // }, // "delimiter": { // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", // "location": "query", // "type": "string" // }, // "maxResults": { // "description": "Maximum number of items plus prefixes to return. As duplicate prefixes are omitted, fewer total results may be returned than requested. The default value of this parameter is 1,000 items.", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "pageToken": { // "description": "A previously-returned page token representing part of the larger set of results to view.", // "location": "query", // "type": "string" // }, // "prefix": { // "description": "Filter results to objects whose names begin with this prefix.", // "location": "query", // "type": "string" // }, // "projection": { // "description": "Set of properties to return. Defaults to noAcl.", // "enum": [ // "full", // "noAcl" // ], // "enumDescriptions": [ // "Include all properties.", // "Omit the acl property." // ], // "location": "query", // "type": "string" // }, // "versions": { // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", // "location": "query", // "type": "boolean" // } // }, // "path": "b/{bucket}/o", // "response": { // "$ref": "Objects" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_only", // "https://www.googleapis.com/auth/devstorage.read_write" // ], // "supportsSubscription": true // } } // method id "storage.objects.patch": type ObjectsPatchCall struct { s *Service bucket string object string object2 *Object opt_ map[string]interface{} } // Patch: Updates an object's metadata. This method supports patch // semantics. func (r *ObjectsService) Patch(bucket string, object string, object2 *Object) *ObjectsPatchCall { c := &ObjectsPatchCall{s: r.s, opt_: make(map[string]interface{})} c.bucket = bucket c.object = object c.object2 = object2 return c } // Generation sets the optional parameter "generation": If present, // selects a specific revision of this object (as opposed to the latest // version, the default). func (c *ObjectsPatchCall) Generation(generation int64) *ObjectsPatchCall { c.opt_["generation"] = generation return c } // IfGenerationMatch sets the optional parameter "ifGenerationMatch": // Makes the operation conditional on whether the object's current // generation matches the given value. func (c *ObjectsPatchCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsPatchCall { c.opt_["ifGenerationMatch"] = ifGenerationMatch return c } // IfGenerationNotMatch sets the optional parameter // "ifGenerationNotMatch": Makes the operation conditional on whether // the object's current generation does not match the given value. func (c *ObjectsPatchCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsPatchCall { c.opt_["ifGenerationNotMatch"] = ifGenerationNotMatch return c } // IfMetagenerationMatch sets the optional parameter // "ifMetagenerationMatch": Makes the operation conditional on whether // the object's current metageneration matches the given value. func (c *ObjectsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsPatchCall { c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch return c } // IfMetagenerationNotMatch sets the optional parameter // "ifMetagenerationNotMatch": Makes the operation conditional on // whether the object's current metageneration does not match the given // value. func (c *ObjectsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsPatchCall { c.opt_["ifMetagenerationNotMatch"] = ifMetagenerationNotMatch return c } // PredefinedAcl sets the optional parameter "predefinedAcl": Apply a // predefined set of access controls to this object. // // Possible values: // "authenticatedRead" - Object owner gets OWNER access, and // allAuthenticatedUsers get READER access. // "bucketOwnerFullControl" - Object owner gets OWNER access, and // project team owners get OWNER access. // "bucketOwnerRead" - Object owner gets OWNER access, and project // team owners get READER access. // "private" - Object owner gets OWNER access. // "projectPrivate" - Object owner gets OWNER access, and project team // members get access according to their roles. // "publicRead" - Object owner gets OWNER access, and allUsers get // READER access. func (c *ObjectsPatchCall) PredefinedAcl(predefinedAcl string) *ObjectsPatchCall { c.opt_["predefinedAcl"] = predefinedAcl return c } // Projection sets the optional parameter "projection": Set of // properties to return. Defaults to full. // // Possible values: // "full" - Include all properties. // "noAcl" - Omit the acl property. func (c *ObjectsPatchCall) Projection(projection string) *ObjectsPatchCall { c.opt_["projection"] = projection return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectsPatchCall) Fields(s ...googleapi.Field) *ObjectsPatchCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ObjectsPatchCall) Do() (*Object, error) { var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) if err != nil { return nil, err } ctype := "application/json" params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["generation"]; ok { params.Set("generation", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifGenerationMatch"]; ok { params.Set("ifGenerationMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifGenerationNotMatch"]; ok { params.Set("ifGenerationNotMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifMetagenerationMatch"]; ok { params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifMetagenerationNotMatch"]; ok { params.Set("ifMetagenerationNotMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["predefinedAcl"]; ok { params.Set("predefinedAcl", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["projection"]; ok { params.Set("projection", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") urls += "?" + params.Encode() req, _ := http.NewRequest("PATCH", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, }) req.Header.Set("Content-Type", ctype) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *Object if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Updates an object's metadata. This method supports patch semantics.", // "httpMethod": "PATCH", // "id": "storage.objects.patch", // "parameterOrder": [ // "bucket", // "object" // ], // "parameters": { // "bucket": { // "description": "Name of the bucket in which the object resides.", // "location": "path", // "required": true, // "type": "string" // }, // "generation": { // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifGenerationMatch": { // "description": "Makes the operation conditional on whether the object's current generation matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifGenerationNotMatch": { // "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationMatch": { // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationNotMatch": { // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "object": { // "description": "Name of the object.", // "location": "path", // "required": true, // "type": "string" // }, // "predefinedAcl": { // "description": "Apply a predefined set of access controls to this object.", // "enum": [ // "authenticatedRead", // "bucketOwnerFullControl", // "bucketOwnerRead", // "private", // "projectPrivate", // "publicRead" // ], // "enumDescriptions": [ // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", // "Object owner gets OWNER access, and project team owners get OWNER access.", // "Object owner gets OWNER access, and project team owners get READER access.", // "Object owner gets OWNER access.", // "Object owner gets OWNER access, and project team members get access according to their roles.", // "Object owner gets OWNER access, and allUsers get READER access." // ], // "location": "query", // "type": "string" // }, // "projection": { // "description": "Set of properties to return. Defaults to full.", // "enum": [ // "full", // "noAcl" // ], // "enumDescriptions": [ // "Include all properties.", // "Omit the acl property." // ], // "location": "query", // "type": "string" // } // }, // "path": "b/{bucket}/o/{object}", // "request": { // "$ref": "Object" // }, // "response": { // "$ref": "Object" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } // method id "storage.objects.rewrite": type ObjectsRewriteCall struct { s *Service sourceBucket string sourceObject string destinationBucket string destinationObject string object *Object opt_ map[string]interface{} } // Rewrite: Rewrites a source object to a destination object. Optionally // overrides metadata. func (r *ObjectsService) Rewrite(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsRewriteCall { c := &ObjectsRewriteCall{s: r.s, opt_: make(map[string]interface{})} c.sourceBucket = sourceBucket c.sourceObject = sourceObject c.destinationBucket = destinationBucket c.destinationObject = destinationObject c.object = object return c } // DestinationPredefinedAcl sets the optional parameter // "destinationPredefinedAcl": Apply a predefined set of access controls // to the destination object. // // Possible values: // "authenticatedRead" - Object owner gets OWNER access, and // allAuthenticatedUsers get READER access. // "bucketOwnerFullControl" - Object owner gets OWNER access, and // project team owners get OWNER access. // "bucketOwnerRead" - Object owner gets OWNER access, and project // team owners get READER access. // "private" - Object owner gets OWNER access. // "projectPrivate" - Object owner gets OWNER access, and project team // members get access according to their roles. // "publicRead" - Object owner gets OWNER access, and allUsers get // READER access. func (c *ObjectsRewriteCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsRewriteCall { c.opt_["destinationPredefinedAcl"] = destinationPredefinedAcl return c } // IfGenerationMatch sets the optional parameter "ifGenerationMatch": // Makes the operation conditional on whether the destination object's // current generation matches the given value. func (c *ObjectsRewriteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsRewriteCall { c.opt_["ifGenerationMatch"] = ifGenerationMatch return c } // IfGenerationNotMatch sets the optional parameter // "ifGenerationNotMatch": Makes the operation conditional on whether // the destination object's current generation does not match the given // value. func (c *ObjectsRewriteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsRewriteCall { c.opt_["ifGenerationNotMatch"] = ifGenerationNotMatch return c } // IfMetagenerationMatch sets the optional parameter // "ifMetagenerationMatch": Makes the operation conditional on whether // the destination object's current metageneration matches the given // value. func (c *ObjectsRewriteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsRewriteCall { c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch return c } // IfMetagenerationNotMatch sets the optional parameter // "ifMetagenerationNotMatch": Makes the operation conditional on // whether the destination object's current metageneration does not // match the given value. func (c *ObjectsRewriteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsRewriteCall { c.opt_["ifMetagenerationNotMatch"] = ifMetagenerationNotMatch return c } // IfSourceGenerationMatch sets the optional parameter // "ifSourceGenerationMatch": Makes the operation conditional on whether // the source object's generation matches the given value. func (c *ObjectsRewriteCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsRewriteCall { c.opt_["ifSourceGenerationMatch"] = ifSourceGenerationMatch return c } // IfSourceGenerationNotMatch sets the optional parameter // "ifSourceGenerationNotMatch": Makes the operation conditional on // whether the source object's generation does not match the given // value. func (c *ObjectsRewriteCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsRewriteCall { c.opt_["ifSourceGenerationNotMatch"] = ifSourceGenerationNotMatch return c } // IfSourceMetagenerationMatch sets the optional parameter // "ifSourceMetagenerationMatch": Makes the operation conditional on // whether the source object's current metageneration matches the given // value. func (c *ObjectsRewriteCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsRewriteCall { c.opt_["ifSourceMetagenerationMatch"] = ifSourceMetagenerationMatch return c } // IfSourceMetagenerationNotMatch sets the optional parameter // "ifSourceMetagenerationNotMatch": Makes the operation conditional on // whether the source object's current metageneration does not match the // given value. func (c *ObjectsRewriteCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsRewriteCall { c.opt_["ifSourceMetagenerationNotMatch"] = ifSourceMetagenerationNotMatch return c } // MaxBytesRewrittenPerCall sets the optional parameter // "maxBytesRewrittenPerCall": The maximum number of bytes that will be // rewritten per rewrite request. Most callers shouldn't need to specify // this parameter - it is primarily in place to support testing. If // specified the value must be an integral multiple of 1 MiB (1048576). // Also, this only applies to requests where the source and destination // span locations and/or storage classes. Finally, this value must not // change across rewrite calls else you'll get an error that the // rewriteToken is invalid. func (c *ObjectsRewriteCall) MaxBytesRewrittenPerCall(maxBytesRewrittenPerCall int64) *ObjectsRewriteCall { c.opt_["maxBytesRewrittenPerCall"] = maxBytesRewrittenPerCall return c } // Projection sets the optional parameter "projection": Set of // properties to return. Defaults to noAcl, unless the object resource // specifies the acl property, when it defaults to full. // // Possible values: // "full" - Include all properties. // "noAcl" - Omit the acl property. func (c *ObjectsRewriteCall) Projection(projection string) *ObjectsRewriteCall { c.opt_["projection"] = projection return c } // RewriteToken sets the optional parameter "rewriteToken": Include this // field (from the previous rewrite response) on each rewrite request // after the first one, until the rewrite response 'done' flag is true. // Calls that provide a rewriteToken can omit all other request fields, // but if included those fields must match the values provided in the // first rewrite request. func (c *ObjectsRewriteCall) RewriteToken(rewriteToken string) *ObjectsRewriteCall { c.opt_["rewriteToken"] = rewriteToken return c } // SourceGeneration sets the optional parameter "sourceGeneration": If // present, selects a specific revision of the source object (as opposed // to the latest version, the default). func (c *ObjectsRewriteCall) SourceGeneration(sourceGeneration int64) *ObjectsRewriteCall { c.opt_["sourceGeneration"] = sourceGeneration return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectsRewriteCall) Fields(s ...googleapi.Field) *ObjectsRewriteCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ObjectsRewriteCall) Do() (*RewriteResponse, error) { var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) if err != nil { return nil, err } ctype := "application/json" params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["destinationPredefinedAcl"]; ok { params.Set("destinationPredefinedAcl", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifGenerationMatch"]; ok { params.Set("ifGenerationMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifGenerationNotMatch"]; ok { params.Set("ifGenerationNotMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifMetagenerationMatch"]; ok { params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifMetagenerationNotMatch"]; ok { params.Set("ifMetagenerationNotMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifSourceGenerationMatch"]; ok { params.Set("ifSourceGenerationMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifSourceGenerationNotMatch"]; ok { params.Set("ifSourceGenerationNotMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifSourceMetagenerationMatch"]; ok { params.Set("ifSourceMetagenerationMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifSourceMetagenerationNotMatch"]; ok { params.Set("ifSourceMetagenerationNotMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["maxBytesRewrittenPerCall"]; ok { params.Set("maxBytesRewrittenPerCall", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["projection"]; ok { params.Set("projection", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["rewriteToken"]; ok { params.Set("rewriteToken", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["sourceGeneration"]; ok { params.Set("sourceGeneration", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}") urls += "?" + params.Encode() req, _ := http.NewRequest("POST", urls, body) googleapi.Expand(req.URL, map[string]string{ "sourceBucket": c.sourceBucket, "sourceObject": c.sourceObject, "destinationBucket": c.destinationBucket, "destinationObject": c.destinationObject, }) req.Header.Set("Content-Type", ctype) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *RewriteResponse if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Rewrites a source object to a destination object. Optionally overrides metadata.", // "httpMethod": "POST", // "id": "storage.objects.rewrite", // "parameterOrder": [ // "sourceBucket", // "sourceObject", // "destinationBucket", // "destinationObject" // ], // "parameters": { // "destinationBucket": { // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", // "location": "path", // "required": true, // "type": "string" // }, // "destinationObject": { // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.", // "location": "path", // "required": true, // "type": "string" // }, // "destinationPredefinedAcl": { // "description": "Apply a predefined set of access controls to the destination object.", // "enum": [ // "authenticatedRead", // "bucketOwnerFullControl", // "bucketOwnerRead", // "private", // "projectPrivate", // "publicRead" // ], // "enumDescriptions": [ // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", // "Object owner gets OWNER access, and project team owners get OWNER access.", // "Object owner gets OWNER access, and project team owners get READER access.", // "Object owner gets OWNER access.", // "Object owner gets OWNER access, and project team members get access according to their roles.", // "Object owner gets OWNER access, and allUsers get READER access." // ], // "location": "query", // "type": "string" // }, // "ifGenerationMatch": { // "description": "Makes the operation conditional on whether the destination object's current generation matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifGenerationNotMatch": { // "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationMatch": { // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationNotMatch": { // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifSourceGenerationMatch": { // "description": "Makes the operation conditional on whether the source object's generation matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifSourceGenerationNotMatch": { // "description": "Makes the operation conditional on whether the source object's generation does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifSourceMetagenerationMatch": { // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifSourceMetagenerationNotMatch": { // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "maxBytesRewrittenPerCall": { // "description": "The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.", // "format": "int64", // "location": "query", // "type": "string" // }, // "projection": { // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", // "enum": [ // "full", // "noAcl" // ], // "enumDescriptions": [ // "Include all properties.", // "Omit the acl property." // ], // "location": "query", // "type": "string" // }, // "rewriteToken": { // "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.", // "location": "query", // "type": "string" // }, // "sourceBucket": { // "description": "Name of the bucket in which to find the source object.", // "location": "path", // "required": true, // "type": "string" // }, // "sourceGeneration": { // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", // "format": "int64", // "location": "query", // "type": "string" // }, // "sourceObject": { // "description": "Name of the source object.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}", // "request": { // "$ref": "Object" // }, // "response": { // "$ref": "RewriteResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } // method id "storage.objects.update": type ObjectsUpdateCall struct { s *Service bucket string object string object2 *Object opt_ map[string]interface{} } // Update: Updates an object's metadata. func (r *ObjectsService) Update(bucket string, object string, object2 *Object) *ObjectsUpdateCall { c := &ObjectsUpdateCall{s: r.s, opt_: make(map[string]interface{})} c.bucket = bucket c.object = object c.object2 = object2 return c } // Generation sets the optional parameter "generation": If present, // selects a specific revision of this object (as opposed to the latest // version, the default). func (c *ObjectsUpdateCall) Generation(generation int64) *ObjectsUpdateCall { c.opt_["generation"] = generation return c } // IfGenerationMatch sets the optional parameter "ifGenerationMatch": // Makes the operation conditional on whether the object's current // generation matches the given value. func (c *ObjectsUpdateCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsUpdateCall { c.opt_["ifGenerationMatch"] = ifGenerationMatch return c } // IfGenerationNotMatch sets the optional parameter // "ifGenerationNotMatch": Makes the operation conditional on whether // the object's current generation does not match the given value. func (c *ObjectsUpdateCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsUpdateCall { c.opt_["ifGenerationNotMatch"] = ifGenerationNotMatch return c } // IfMetagenerationMatch sets the optional parameter // "ifMetagenerationMatch": Makes the operation conditional on whether // the object's current metageneration matches the given value. func (c *ObjectsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsUpdateCall { c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch return c } // IfMetagenerationNotMatch sets the optional parameter // "ifMetagenerationNotMatch": Makes the operation conditional on // whether the object's current metageneration does not match the given // value. func (c *ObjectsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsUpdateCall { c.opt_["ifMetagenerationNotMatch"] = ifMetagenerationNotMatch return c } // PredefinedAcl sets the optional parameter "predefinedAcl": Apply a // predefined set of access controls to this object. // // Possible values: // "authenticatedRead" - Object owner gets OWNER access, and // allAuthenticatedUsers get READER access. // "bucketOwnerFullControl" - Object owner gets OWNER access, and // project team owners get OWNER access. // "bucketOwnerRead" - Object owner gets OWNER access, and project // team owners get READER access. // "private" - Object owner gets OWNER access. // "projectPrivate" - Object owner gets OWNER access, and project team // members get access according to their roles. // "publicRead" - Object owner gets OWNER access, and allUsers get // READER access. func (c *ObjectsUpdateCall) PredefinedAcl(predefinedAcl string) *ObjectsUpdateCall { c.opt_["predefinedAcl"] = predefinedAcl return c } // Projection sets the optional parameter "projection": Set of // properties to return. Defaults to full. // // Possible values: // "full" - Include all properties. // "noAcl" - Omit the acl property. func (c *ObjectsUpdateCall) Projection(projection string) *ObjectsUpdateCall { c.opt_["projection"] = projection return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectsUpdateCall) Fields(s ...googleapi.Field) *ObjectsUpdateCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ObjectsUpdateCall) Do() (*Object, error) { var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) if err != nil { return nil, err } ctype := "application/json" params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["generation"]; ok { params.Set("generation", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifGenerationMatch"]; ok { params.Set("ifGenerationMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifGenerationNotMatch"]; ok { params.Set("ifGenerationNotMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifMetagenerationMatch"]; ok { params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["ifMetagenerationNotMatch"]; ok { params.Set("ifMetagenerationNotMatch", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["predefinedAcl"]; ok { params.Set("predefinedAcl", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["projection"]; ok { params.Set("projection", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") urls += "?" + params.Encode() req, _ := http.NewRequest("PUT", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, }) req.Header.Set("Content-Type", ctype) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *Object if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Updates an object's metadata.", // "httpMethod": "PUT", // "id": "storage.objects.update", // "parameterOrder": [ // "bucket", // "object" // ], // "parameters": { // "bucket": { // "description": "Name of the bucket in which the object resides.", // "location": "path", // "required": true, // "type": "string" // }, // "generation": { // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifGenerationMatch": { // "description": "Makes the operation conditional on whether the object's current generation matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifGenerationNotMatch": { // "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationMatch": { // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationNotMatch": { // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "object": { // "description": "Name of the object.", // "location": "path", // "required": true, // "type": "string" // }, // "predefinedAcl": { // "description": "Apply a predefined set of access controls to this object.", // "enum": [ // "authenticatedRead", // "bucketOwnerFullControl", // "bucketOwnerRead", // "private", // "projectPrivate", // "publicRead" // ], // "enumDescriptions": [ // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", // "Object owner gets OWNER access, and project team owners get OWNER access.", // "Object owner gets OWNER access, and project team owners get READER access.", // "Object owner gets OWNER access.", // "Object owner gets OWNER access, and project team members get access according to their roles.", // "Object owner gets OWNER access, and allUsers get READER access." // ], // "location": "query", // "type": "string" // }, // "projection": { // "description": "Set of properties to return. Defaults to full.", // "enum": [ // "full", // "noAcl" // ], // "enumDescriptions": [ // "Include all properties.", // "Omit the acl property." // ], // "location": "query", // "type": "string" // } // }, // "path": "b/{bucket}/o/{object}", // "request": { // "$ref": "Object" // }, // "response": { // "$ref": "Object" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_write" // ], // "supportsMediaDownload": true // } } // method id "storage.objects.watchAll": type ObjectsWatchAllCall struct { s *Service bucket string channel *Channel opt_ map[string]interface{} } // WatchAll: Watch for changes on all objects in a bucket. func (r *ObjectsService) WatchAll(bucket string, channel *Channel) *ObjectsWatchAllCall { c := &ObjectsWatchAllCall{s: r.s, opt_: make(map[string]interface{})} c.bucket = bucket c.channel = channel return c } // Delimiter sets the optional parameter "delimiter": Returns results in // a directory-like mode. items will contain only objects whose names, // aside from the prefix, do not contain delimiter. Objects whose names, // aside from the prefix, contain delimiter will have their name, // truncated after the delimiter, returned in prefixes. Duplicate // prefixes are omitted. func (c *ObjectsWatchAllCall) Delimiter(delimiter string) *ObjectsWatchAllCall { c.opt_["delimiter"] = delimiter return c } // MaxResults sets the optional parameter "maxResults": Maximum number // of items plus prefixes to return. As duplicate prefixes are omitted, // fewer total results may be returned than requested. The default value // of this parameter is 1,000 items. func (c *ObjectsWatchAllCall) MaxResults(maxResults int64) *ObjectsWatchAllCall { c.opt_["maxResults"] = maxResults return c } // PageToken sets the optional parameter "pageToken": A // previously-returned page token representing part of the larger set of // results to view. func (c *ObjectsWatchAllCall) PageToken(pageToken string) *ObjectsWatchAllCall { c.opt_["pageToken"] = pageToken return c } // Prefix sets the optional parameter "prefix": Filter results to // objects whose names begin with this prefix. func (c *ObjectsWatchAllCall) Prefix(prefix string) *ObjectsWatchAllCall { c.opt_["prefix"] = prefix return c } // Projection sets the optional parameter "projection": Set of // properties to return. Defaults to noAcl. // // Possible values: // "full" - Include all properties. // "noAcl" - Omit the acl property. func (c *ObjectsWatchAllCall) Projection(projection string) *ObjectsWatchAllCall { c.opt_["projection"] = projection return c } // Versions sets the optional parameter "versions": If true, lists all // versions of an object as distinct results. The default is false. For // more information, see Object Versioning. func (c *ObjectsWatchAllCall) Versions(versions bool) *ObjectsWatchAllCall { c.opt_["versions"] = versions return c } // Fields allows partial responses to be retrieved. // See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectsWatchAllCall) Fields(s ...googleapi.Field) *ObjectsWatchAllCall { c.opt_["fields"] = googleapi.CombineFields(s) return c } func (c *ObjectsWatchAllCall) Do() (*Channel, error) { var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) if err != nil { return nil, err } ctype := "application/json" params := make(url.Values) params.Set("alt", "json") if v, ok := c.opt_["delimiter"]; ok { params.Set("delimiter", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["maxResults"]; ok { params.Set("maxResults", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["pageToken"]; ok { params.Set("pageToken", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["prefix"]; ok { params.Set("prefix", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["projection"]; ok { params.Set("projection", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["versions"]; ok { params.Set("versions", fmt.Sprintf("%v", v)) } if v, ok := c.opt_["fields"]; ok { params.Set("fields", fmt.Sprintf("%v", v)) } urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/watch") urls += "?" + params.Encode() req, _ := http.NewRequest("POST", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) req.Header.Set("Content-Type", ctype) req.Header.Set("User-Agent", c.s.userAgent()) res, err := c.s.client.Do(req) if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } var ret *Channel if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { return nil, err } return ret, nil // { // "description": "Watch for changes on all objects in a bucket.", // "httpMethod": "POST", // "id": "storage.objects.watchAll", // "parameterOrder": [ // "bucket" // ], // "parameters": { // "bucket": { // "description": "Name of the bucket in which to look for objects.", // "location": "path", // "required": true, // "type": "string" // }, // "delimiter": { // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", // "location": "query", // "type": "string" // }, // "maxResults": { // "description": "Maximum number of items plus prefixes to return. As duplicate prefixes are omitted, fewer total results may be returned than requested. The default value of this parameter is 1,000 items.", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "pageToken": { // "description": "A previously-returned page token representing part of the larger set of results to view.", // "location": "query", // "type": "string" // }, // "prefix": { // "description": "Filter results to objects whose names begin with this prefix.", // "location": "query", // "type": "string" // }, // "projection": { // "description": "Set of properties to return. Defaults to noAcl.", // "enum": [ // "full", // "noAcl" // ], // "enumDescriptions": [ // "Include all properties.", // "Omit the acl property." // ], // "location": "query", // "type": "string" // }, // "versions": { // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", // "location": "query", // "type": "boolean" // } // }, // "path": "b/{bucket}/o/watch", // "request": { // "$ref": "Channel", // "parameterName": "resource" // }, // "response": { // "$ref": "Channel" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_only", // "https://www.googleapis.com/auth/devstorage.read_write" // ], // "supportsSubscription": true // } } ================================================ FILE: vendor/google.golang.org/appengine/.travis.yml ================================================ language: go sudo: false go: - 1.4 install: - go get -v -t -d google.golang.org/appengine/... - mkdir sdk - curl -o sdk.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.24.zip" - unzip sdk.zip -d sdk - export APPENGINE_DEV_APPSERVER=$(pwd)/sdk/go_appengine/dev_appserver.py script: - go version - go test -v google.golang.org/appengine/... - go test -v -race google.golang.org/appengine/... - sdk/go_appengine/goapp test -v google.golang.org/appengine/... ================================================ FILE: vendor/google.golang.org/appengine/LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: vendor/google.golang.org/appengine/README.md ================================================ # Go App Engine packages [![Build Status](https://travis-ci.org/golang/appengine.svg)](https://travis-ci.org/golang/appengine) This repository supports the Go runtime on App Engine, including both classic App Engine and Managed VMs. It provides APIs for interacting with App Engine services. Its canonical import path is `google.golang.org/appengine`. See https://cloud.google.com/appengine/docs/go/ for more information. File issue reports and feature requests on the [Google App Engine issue tracker](https://code.google.com/p/googleappengine/issues/entry?template=Go%20defect). ## Directory structure The top level directory of this repository is the `appengine` package. It contains the basic APIs (e.g. `appengine.NewContext`) that apply across APIs. Specific API packages are in subdirectories (e.g. `datastore`). There is an `internal` subdirectory that contains service protocol buffers, plus packages required for connectivity to make API calls. App Engine apps should not directly import any package under `internal`. ## Updating a Go App Engine app This section describes how to update a traditional Go App Engine app to use these packages. ### 1. Update YAML files (Managed VMs only) The `app.yaml` file (and YAML files for modules) should have these new lines added: ``` vm: true manual_scaling: instances: 1 ``` See https://cloud.google.com/appengine/docs/go/modules/#Go_Instance_scaling_and_class for details. ### 2. Update import paths The import paths for App Engine packages are now fully qualified, based at `google.golang.org/appengine`. You will need to update your code to use import paths starting with that; for instance, code importing `appengine/datastore` will now need to import `google.golang.org/appengine/datastore`. You can do that manually, or by running this command to recursively update all Go source files in the current directory: (may require GNU sed) ``` sed -i '/"appengine/{s,"appengine,"google.golang.org/appengine,;s,appengine_,appengine/,}' \ $(find . -name '*.go') ``` ### 3. Update code using deprecated, removed or modified APIs Most App Engine services are available with exactly the same API. A few APIs were cleaned up, and some are not available yet. This list summarises the differences: * `appengine.Context` has been replaced with the `Context` type from `golang.org/x/net/context`. * Logging methods that were on `appengine.Context` are now functions in `google.golang.org/appengine/log`. * `appengine.Timeout` has been removed. Use `context.WithTimeout` instead. * `appengine.Datacenter` now takes a `context.Context` argument. * `datastore.PropertyLoadSaver` has been simplified to use slices in place of channels. * `delay.Call` now returns an error. * `search.FieldLoadSaver` now handles document metadata. * `urlfetch.Transport` no longer has a Deadline field; set a deadline on the `context.Context` instead. * `aetest` no longer declares its own Context type, and uses the standard one instead. * `taskqueue.QueueStats` no longer takes a maxTasks argument. That argument has been deprecated and unused for a long time. * `appengine.BackendHostname` and `appengine.BackendInstance` were for the deprecated backends feature. Use `appengine.ModuleHostname`and `appengine.ModuleName` instead. * Most of `appengine/file` and parts of `appengine/blobstore` are deprecated. Use [Google Cloud Storage](https://godoc.org/google.golang.org/cloud/storage) instead. * `appengine/socket` is not required on Managed VMs. Use the standard `net` package instead. ================================================ FILE: vendor/google.golang.org/appengine/aetest/doc.go ================================================ /* Package aetest provides an API for running dev_appserver for use in tests. An example test file: package foo_test import ( "testing" "google.golang.org/appengine/memcache" "google.golang.org/appengine/aetest" ) func TestFoo(t *testing.T) { ctx, done, err := aetest.NewContext() if err != nil { t.Fatal(err) } defer done() it := &memcache.Item{ Key: "some-key", Value: []byte("some-value"), } err = memcache.Set(ctx, it) if err != nil { t.Fatalf("Set err: %v", err) } it, err = memcache.Get(ctx, "some-key") if err != nil { t.Fatalf("Get err: %v; want no error", err) } if g, w := string(it.Value), "some-value" ; g != w { t.Errorf("retrieved Item.Value = %q, want %q", g, w) } } The environment variable APPENGINE_DEV_APPSERVER specifies the location of the dev_appserver.py executable to use. If unset, the system PATH is consulted. */ package aetest ================================================ FILE: vendor/google.golang.org/appengine/aetest/instance.go ================================================ package aetest import ( "io" "net/http" "golang.org/x/net/context" "google.golang.org/appengine" ) // Instance represents a running instance of the development API Server. type Instance interface { // Close kills the child api_server.py process, releasing its resources. io.Closer // NewRequest returns an *http.Request associated with this instance. NewRequest(method, urlStr string, body io.Reader) (*http.Request, error) } // Options is used to specify options when creating an Instance. type Options struct { // AppID specifies the App ID to use during tests. // By default, "testapp". AppID string // StronglyConsistentDatastore is whether the local datastore should be // strongly consistent. This will diverge from production behaviour. StronglyConsistentDatastore bool } // NewContext starts an instance of the development API server, and returns // a context that will route all API calls to that server, as well as a // closure that must be called when the Context is no longer required. func NewContext() (context.Context, func(), error) { inst, err := NewInstance(nil) if err != nil { return nil, nil, err } req, err := inst.NewRequest("GET", "/", nil) if err != nil { inst.Close() return nil, nil, err } ctx := appengine.NewContext(req) return ctx, func() { inst.Close() }, nil } // PrepareDevAppserver is a hook which, if set, will be called before the // dev_appserver.py is started, each time it is started. If aetest.NewContext // is invoked from the goapp test tool, this hook is unnecessary. var PrepareDevAppserver func() error ================================================ FILE: vendor/google.golang.org/appengine/aetest/instance_classic.go ================================================ // +build appengine package aetest import "appengine/aetest" // NewInstance launches a running instance of api_server.py which can be used // for multiple test Contexts that delegate all App Engine API calls to that // instance. // If opts is nil the default values are used. func NewInstance(opts *Options) (Instance, error) { aetest.PrepareDevAppserver = PrepareDevAppserver var aeOpts *aetest.Options if opts != nil { aeOpts = &aetest.Options{ AppID: opts.AppID, StronglyConsistentDatastore: opts.StronglyConsistentDatastore, } } return aetest.NewInstance(aeOpts) } ================================================ FILE: vendor/google.golang.org/appengine/aetest/instance_vm.go ================================================ // +build !appengine package aetest import ( "bufio" "crypto/rand" "errors" "fmt" "io" "io/ioutil" "net/http" "net/url" "os" "os/exec" "path/filepath" "regexp" "time" "golang.org/x/net/context" "google.golang.org/appengine/internal" ) // NewInstance launches a running instance of api_server.py which can be used // for multiple test Contexts that delegate all App Engine API calls to that // instance. // If opts is nil the default values are used. func NewInstance(opts *Options) (Instance, error) { i := &instance{ opts: opts, appID: "testapp", } if opts != nil && opts.AppID != "" { i.appID = opts.AppID } if err := i.startChild(); err != nil { return nil, err } return i, nil } func newSessionID() string { var buf [16]byte io.ReadFull(rand.Reader, buf[:]) return fmt.Sprintf("%x", buf[:]) } // instance implements the Instance interface. type instance struct { opts *Options child *exec.Cmd apiURL *url.URL // base URL of API HTTP server adminURL string // base URL of admin HTTP server appDir string appID string relFuncs []func() // funcs to release any associated contexts } // NewRequest returns an *http.Request associated with this instance. func (i *instance) NewRequest(method, urlStr string, body io.Reader) (*http.Request, error) { req, err := http.NewRequest(method, urlStr, body) if err != nil { return nil, err } // Associate this request. release := internal.RegisterTestRequest(req, i.apiURL, func(ctx context.Context) context.Context { ctx = internal.WithAppIDOverride(ctx, "dev~"+i.appID) return ctx }) i.relFuncs = append(i.relFuncs, release) return req, nil } // Close kills the child api_server.py process, releasing its resources. func (i *instance) Close() (err error) { for _, rel := range i.relFuncs { rel() } i.relFuncs = nil if i.child == nil { return nil } defer func() { i.child = nil err1 := os.RemoveAll(i.appDir) if err == nil { err = err1 } }() if p := i.child.Process; p != nil { errc := make(chan error, 1) go func() { errc <- i.child.Wait() }() // Call the quit handler on the admin server. res, err := http.Get(i.adminURL + "/quit") if err != nil { p.Kill() return fmt.Errorf("unable to call /quit handler: %v", err) } res.Body.Close() select { case <-time.After(15 * time.Second): p.Kill() return errors.New("timeout killing child process") case err = <-errc: // Do nothing. } } return } func fileExists(path string) bool { _, err := os.Stat(path) return err == nil } func findPython() (path string, err error) { for _, name := range []string{"python2.7", "python"} { path, err = exec.LookPath(name) if err == nil { return } } return } func findDevAppserver() (string, error) { if p := os.Getenv("APPENGINE_DEV_APPSERVER"); p != "" { if fileExists(p) { return p, nil } return "", fmt.Errorf("invalid APPENGINE_DEV_APPSERVER environment variable; path %q doesn't exist", p) } return exec.LookPath("dev_appserver.py") } var apiServerAddrRE = regexp.MustCompile(`Starting API server at: (\S+)`) var adminServerAddrRE = regexp.MustCompile(`Starting admin server at: (\S+)`) func (i *instance) startChild() (err error) { if PrepareDevAppserver != nil { if err := PrepareDevAppserver(); err != nil { return err } } python, err := findPython() if err != nil { return fmt.Errorf("Could not find python interpreter: %v", err) } devAppserver, err := findDevAppserver() if err != nil { return fmt.Errorf("Could not find dev_appserver.py: %v", err) } i.appDir, err = ioutil.TempDir("", "appengine-aetest") if err != nil { return err } defer func() { if err != nil { os.RemoveAll(i.appDir) } }() err = os.Mkdir(filepath.Join(i.appDir, "app"), 0755) if err != nil { return err } err = ioutil.WriteFile(filepath.Join(i.appDir, "app", "app.yaml"), []byte(i.appYAML()), 0644) if err != nil { return err } err = ioutil.WriteFile(filepath.Join(i.appDir, "app", "stubapp.go"), []byte(appSource), 0644) if err != nil { return err } appserverArgs := []string{ devAppserver, "--port=0", "--api_port=0", "--admin_port=0", "--automatic_restart=false", "--skip_sdk_update_check=true", "--clear_datastore=true", "--clear_search_indexes=true", "--datastore_path", filepath.Join(i.appDir, "datastore"), } if i.opts != nil && i.opts.StronglyConsistentDatastore { appserverArgs = append(appserverArgs, "--datastore_consistency_policy=consistent") } appserverArgs = append(appserverArgs, filepath.Join(i.appDir, "app")) i.child = exec.Command(python, appserverArgs..., ) i.child.Stdout = os.Stdout var stderr io.Reader stderr, err = i.child.StderrPipe() if err != nil { return err } stderr = io.TeeReader(stderr, os.Stderr) if err = i.child.Start(); err != nil { return err } // Read stderr until we have read the URLs of the API server and admin interface. errc := make(chan error, 1) go func() { s := bufio.NewScanner(stderr) for s.Scan() { if match := apiServerAddrRE.FindStringSubmatch(s.Text()); match != nil { u, err := url.Parse(match[1]) if err != nil { errc <- fmt.Errorf("failed to parse API URL %q: %v", match[1], err) return } i.apiURL = u } if match := adminServerAddrRE.FindStringSubmatch(s.Text()); match != nil { i.adminURL = match[1] } if i.adminURL != "" && i.apiURL != nil { break } } errc <- s.Err() }() select { case <-time.After(15 * time.Second): if p := i.child.Process; p != nil { p.Kill() } return errors.New("timeout starting child process") case err := <-errc: if err != nil { return fmt.Errorf("error reading child process stderr: %v", err) } } if i.adminURL == "" { return errors.New("unable to find admin server URL") } if i.apiURL == nil { return errors.New("unable to find API server URL") } return nil } func (i *instance) appYAML() string { return fmt.Sprintf(appYAMLTemplate, i.appID) } const appYAMLTemplate = ` application: %s version: 1 runtime: go api_version: go1 vm: true handlers: - url: /.* script: _go_app ` const appSource = ` package main import "google.golang.org/appengine" func main() { appengine.Main() } ` ================================================ FILE: vendor/google.golang.org/appengine/aetest/user.go ================================================ package aetest import ( "hash/crc32" "net/http" "strconv" "google.golang.org/appengine/user" ) // Login causes the provided Request to act as though issued by the given user. func Login(u *user.User, req *http.Request) { req.Header.Set("X-AppEngine-User-Email", u.Email) id := u.ID if id == "" { id = strconv.Itoa(int(crc32.Checksum([]byte(u.Email), crc32.IEEETable))) } req.Header.Set("X-AppEngine-User-Id", id) req.Header.Set("X-AppEngine-User-Federated-Identity", u.Email) req.Header.Set("X-AppEngine-User-Federated-Provider", u.FederatedProvider) if u.Admin { req.Header.Set("X-AppEngine-User-Is-Admin", "1") } else { req.Header.Set("X-AppEngine-User-Is-Admin", "0") } } // Logout causes the provided Request to act as though issued by a logged-out // user. func Logout(req *http.Request) { req.Header.Del("X-AppEngine-User-Email") req.Header.Del("X-AppEngine-User-Id") req.Header.Del("X-AppEngine-User-Is-Admin") req.Header.Del("X-AppEngine-User-Federated-Identity") req.Header.Del("X-AppEngine-User-Federated-Provider") } ================================================ FILE: vendor/google.golang.org/appengine/appengine.go ================================================ // Copyright 2011 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. // Package appengine provides basic functionality for Google App Engine. // // For more information on how to write Go apps for Google App Engine, see: // https://cloud.google.com/appengine/docs/go/ package appengine import ( "net/http" "github.com/golang/protobuf/proto" "golang.org/x/net/context" "google.golang.org/appengine/internal" ) // IsDevAppServer reports whether the App Engine app is running in the // development App Server. func IsDevAppServer() bool { return internal.IsDevAppServer() } // NewContext returns a context for an in-flight HTTP request. // This function is cheap. func NewContext(req *http.Request) context.Context { return WithContext(context.Background(), req) } // WithContext returns a copy of the parent context // and associates it with an in-flight HTTP request. // This function is cheap. func WithContext(parent context.Context, req *http.Request) context.Context { return internal.WithContext(parent, req) } // TODO(dsymonds): Add a Call function here? Otherwise other packages can't access internal.Call. // BlobKey is a key for a blobstore blob. // // Conceptually, this type belongs in the blobstore package, but it lives in // the appengine package to avoid a circular dependency: blobstore depends on // datastore, and datastore needs to refer to the BlobKey type. type BlobKey string // GeoPoint represents a location as latitude/longitude in degrees. type GeoPoint struct { Lat, Lng float64 } // Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude. func (g GeoPoint) Valid() bool { return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180 } // APICallFunc defines a function type for handling an API call. // See WithCallOverride. type APICallFunc func(ctx context.Context, service, method string, in, out proto.Message) error // WithCallOverride returns a copy of the parent context // that will cause API calls to invoke f instead of their normal operation. // // This is intended for advanced users only. func WithAPICallFunc(ctx context.Context, f APICallFunc) context.Context { return internal.WithCallOverride(ctx, internal.CallOverrideFunc(f)) } // APICall performs an API call. // // This is not intended for general use; it is exported for use in conjunction // with WithAPICallFunc. func APICall(ctx context.Context, service, method string, in, out proto.Message) error { return internal.Call(ctx, service, method, in, out) } ================================================ FILE: vendor/google.golang.org/appengine/appengine_vm.go ================================================ // Copyright 2015 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. // +build !appengine package appengine import ( "golang.org/x/net/context" "google.golang.org/appengine/internal" ) // The comment below must not be changed. // It is used by go-app-builder to recognise that this package has // the Main function to use in the synthetic main. // The gophers party all night; the rabbits provide the beats. // Main is the principal entry point for a Managed VMs app. // It installs a trivial health checker if one isn't already registered, // and starts listening on port 8080 (overridden by the $PORT environment // variable). // // See https://cloud.google.com/appengine/docs/managed-vms/custom-runtimes#health_check_requests // for details on how to do your own health checking. // // Main never returns. // // Main is designed so that the app's main package looks like this: // // package main // // import ( // "google.golang.org/appengine" // // _ "myapp/package0" // _ "myapp/package1" // ) // // func main() { // appengine.Main() // } // // The "myapp/packageX" packages are expected to register HTTP handlers // in their init functions. func Main() { internal.Main() } // BackgroundContext returns a context not associated with a request. // This should only be used when not servicing a request. // This only works on Managed VMs. func BackgroundContext() context.Context { return internal.BackgroundContext() } ================================================ FILE: vendor/google.golang.org/appengine/blobstore/blobstore.go ================================================ // Copyright 2011 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. // Package blobstore provides a client for App Engine's persistent blob // storage service. package blobstore import ( "bufio" "encoding/base64" "fmt" "io" "io/ioutil" "mime" "mime/multipart" "net/http" "net/textproto" "net/url" "strconv" "strings" "time" "github.com/golang/protobuf/proto" "golang.org/x/net/context" "google.golang.org/appengine" "google.golang.org/appengine/datastore" "google.golang.org/appengine/internal" basepb "google.golang.org/appengine/internal/base" blobpb "google.golang.org/appengine/internal/blobstore" ) const ( blobInfoKind = "__BlobInfo__" blobFileIndexKind = "__BlobFileIndex__" zeroKey = appengine.BlobKey("") ) // BlobInfo is the blob metadata that is stored in the datastore. // Filename may be empty. type BlobInfo struct { BlobKey appengine.BlobKey ContentType string `datastore:"content_type"` CreationTime time.Time `datastore:"creation"` Filename string `datastore:"filename"` Size int64 `datastore:"size"` MD5 string `datastore:"md5_hash"` // ObjectName is the Google Cloud Storage name for this blob. ObjectName string `datastore:"gs_object_name"` } // isErrFieldMismatch returns whether err is a datastore.ErrFieldMismatch. // // The blobstore stores blob metadata in the datastore. When loading that // metadata, it may contain fields that we don't care about. datastore.Get will // return datastore.ErrFieldMismatch in that case, so we ignore that specific // error. func isErrFieldMismatch(err error) bool { _, ok := err.(*datastore.ErrFieldMismatch) return ok } // Stat returns the BlobInfo for a provided blobKey. If no blob was found for // that key, Stat returns datastore.ErrNoSuchEntity. func Stat(c context.Context, blobKey appengine.BlobKey) (*BlobInfo, error) { c, _ = appengine.Namespace(c, "") // Blobstore is always in the empty string namespace dskey := datastore.NewKey(c, blobInfoKind, string(blobKey), 0, nil) bi := &BlobInfo{ BlobKey: blobKey, } if err := datastore.Get(c, dskey, bi); err != nil && !isErrFieldMismatch(err) { return nil, err } return bi, nil } // Send sets the headers on response to instruct App Engine to send a blob as // the response body. This is more efficient than reading and writing it out // manually and isn't subject to normal response size limits. func Send(response http.ResponseWriter, blobKey appengine.BlobKey) { hdr := response.Header() hdr.Set("X-AppEngine-BlobKey", string(blobKey)) if hdr.Get("Content-Type") == "" { // This value is known to dev_appserver to mean automatic. // In production this is remapped to the empty value which // means automatic. hdr.Set("Content-Type", "application/vnd.google.appengine.auto") } } // UploadURL creates an upload URL for the form that the user will // fill out, passing the application path to load when the POST of the // form is completed. These URLs expire and should not be reused. The // opts parameter may be nil. func UploadURL(c context.Context, successPath string, opts *UploadURLOptions) (*url.URL, error) { req := &blobpb.CreateUploadURLRequest{ SuccessPath: proto.String(successPath), } if opts != nil { if n := opts.MaxUploadBytes; n != 0 { req.MaxUploadSizeBytes = &n } if n := opts.MaxUploadBytesPerBlob; n != 0 { req.MaxUploadSizePerBlobBytes = &n } if s := opts.StorageBucket; s != "" { req.GsBucketName = &s } } res := &blobpb.CreateUploadURLResponse{} if err := internal.Call(c, "blobstore", "CreateUploadURL", req, res); err != nil { return nil, err } return url.Parse(*res.Url) } // UploadURLOptions are the options to create an upload URL. type UploadURLOptions struct { MaxUploadBytes int64 // optional MaxUploadBytesPerBlob int64 // optional // StorageBucket specifies the Google Cloud Storage bucket in which // to store the blob. // This is required if you use Cloud Storage instead of Blobstore. // Your application must have permission to write to the bucket. // You may optionally specify a bucket name and path in the format // "bucket_name/path", in which case the included path will be the // prefix of the uploaded object's name. StorageBucket string } // Delete deletes a blob. func Delete(c context.Context, blobKey appengine.BlobKey) error { return DeleteMulti(c, []appengine.BlobKey{blobKey}) } // DeleteMulti deletes multiple blobs. func DeleteMulti(c context.Context, blobKey []appengine.BlobKey) error { s := make([]string, len(blobKey)) for i, b := range blobKey { s[i] = string(b) } req := &blobpb.DeleteBlobRequest{ BlobKey: s, } res := &basepb.VoidProto{} if err := internal.Call(c, "blobstore", "DeleteBlob", req, res); err != nil { return err } return nil } func errorf(format string, args ...interface{}) error { return fmt.Errorf("blobstore: "+format, args...) } // ParseUpload parses the synthetic POST request that your app gets from // App Engine after a user's successful upload of blobs. Given the request, // ParseUpload returns a map of the blobs received (keyed by HTML form // element name) and other non-blob POST parameters. func ParseUpload(req *http.Request) (blobs map[string][]*BlobInfo, other url.Values, err error) { _, params, err := mime.ParseMediaType(req.Header.Get("Content-Type")) if err != nil { return nil, nil, err } boundary := params["boundary"] if boundary == "" { return nil, nil, errorf("did not find MIME multipart boundary") } blobs = make(map[string][]*BlobInfo) other = make(url.Values) mreader := multipart.NewReader(io.MultiReader(req.Body, strings.NewReader("\r\n\r\n")), boundary) for { part, perr := mreader.NextPart() if perr == io.EOF { break } if perr != nil { return nil, nil, errorf("error reading next mime part with boundary %q (len=%d): %v", boundary, len(boundary), perr) } bi := &BlobInfo{} ctype, params, err := mime.ParseMediaType(part.Header.Get("Content-Disposition")) if err != nil { return nil, nil, err } bi.Filename = params["filename"] formKey := params["name"] ctype, params, err = mime.ParseMediaType(part.Header.Get("Content-Type")) if err != nil { return nil, nil, err } bi.BlobKey = appengine.BlobKey(params["blob-key"]) if ctype != "message/external-body" || bi.BlobKey == "" { if formKey != "" { slurp, serr := ioutil.ReadAll(part) if serr != nil { return nil, nil, errorf("error reading %q MIME part", formKey) } other[formKey] = append(other[formKey], string(slurp)) } continue } // App Engine sends a MIME header as the body of each MIME part. tp := textproto.NewReader(bufio.NewReader(part)) header, mimeerr := tp.ReadMIMEHeader() if mimeerr != nil { return nil, nil, mimeerr } bi.Size, err = strconv.ParseInt(header.Get("Content-Length"), 10, 64) if err != nil { return nil, nil, err } bi.ContentType = header.Get("Content-Type") // Parse the time from the MIME header like: // X-AppEngine-Upload-Creation: 2011-03-15 21:38:34.712136 createDate := header.Get("X-AppEngine-Upload-Creation") if createDate == "" { return nil, nil, errorf("expected to find an X-AppEngine-Upload-Creation header") } bi.CreationTime, err = time.Parse("2006-01-02 15:04:05.000000", createDate) if err != nil { return nil, nil, errorf("error parsing X-AppEngine-Upload-Creation: %s", err) } if hdr := header.Get("Content-MD5"); hdr != "" { md5, err := base64.URLEncoding.DecodeString(hdr) if err != nil { return nil, nil, errorf("bad Content-MD5 %q: %v", hdr, err) } bi.MD5 = string(md5) } // If the GCS object name was provided, record it. bi.ObjectName = header.Get("X-AppEngine-Cloud-Storage-Object") blobs[formKey] = append(blobs[formKey], bi) } return } // Reader is a blob reader. type Reader interface { io.Reader io.ReaderAt io.Seeker } // NewReader returns a reader for a blob. It always succeeds; if the blob does // not exist then an error will be reported upon first read. func NewReader(c context.Context, blobKey appengine.BlobKey) Reader { return openBlob(c, blobKey) } // BlobKeyForFile returns a BlobKey for a Google Storage file. // The filename should be of the form "/gs/bucket_name/object_name". func BlobKeyForFile(c context.Context, filename string) (appengine.BlobKey, error) { req := &blobpb.CreateEncodedGoogleStorageKeyRequest{ Filename: &filename, } res := &blobpb.CreateEncodedGoogleStorageKeyResponse{} if err := internal.Call(c, "blobstore", "CreateEncodedGoogleStorageKey", req, res); err != nil { return "", err } return appengine.BlobKey(*res.BlobKey), nil } ================================================ FILE: vendor/google.golang.org/appengine/blobstore/read.go ================================================ // Copyright 2012 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. package blobstore import ( "errors" "fmt" "io" "os" "sync" "github.com/golang/protobuf/proto" "golang.org/x/net/context" "google.golang.org/appengine" "google.golang.org/appengine/internal" blobpb "google.golang.org/appengine/internal/blobstore" ) // openBlob returns a reader for a blob. It always succeeds; if the blob does // not exist then an error will be reported upon first read. func openBlob(c context.Context, blobKey appengine.BlobKey) Reader { return &reader{ c: c, blobKey: blobKey, } } const readBufferSize = 256 * 1024 // reader is a blob reader. It implements the Reader interface. type reader struct { c context.Context // Either blobKey or filename is set: blobKey appengine.BlobKey filename string closeFunc func() // is nil if unavailable or already closed. // buf is the read buffer. r is how much of buf has been read. // off is the offset of buf[0] relative to the start of the blob. // An invariant is 0 <= r && r <= len(buf). // Reads that don't require an RPC call will increment r but not off. // Seeks may modify r without discarding the buffer, but only if the // invariant can be maintained. mu sync.Mutex buf []byte r int off int64 } func (r *reader) Close() error { if f := r.closeFunc; f != nil { f() } r.closeFunc = nil return nil } func (r *reader) Read(p []byte) (int, error) { if len(p) == 0 { return 0, nil } r.mu.Lock() defer r.mu.Unlock() if r.r == len(r.buf) { if err := r.fetch(r.off + int64(r.r)); err != nil { return 0, err } } n := copy(p, r.buf[r.r:]) r.r += n return n, nil } func (r *reader) ReadAt(p []byte, off int64) (int, error) { if len(p) == 0 { return 0, nil } r.mu.Lock() defer r.mu.Unlock() // Convert relative offsets to absolute offsets. ab0 := r.off + int64(r.r) ab1 := r.off + int64(len(r.buf)) ap0 := off ap1 := off + int64(len(p)) // Check if we can satisfy the read entirely out of the existing buffer. if r.off <= ap0 && ap1 <= ab1 { // Convert off from an absolute offset to a relative offset. rp0 := int(ap0 - r.off) return copy(p, r.buf[rp0:]), nil } // Restore the original Read/Seek offset after ReadAt completes. defer r.seek(ab0) // Repeatedly fetch and copy until we have filled p. n := 0 for len(p) > 0 { if err := r.fetch(off + int64(n)); err != nil { return n, err } r.r = copy(p, r.buf) n += r.r p = p[r.r:] } return n, nil } func (r *reader) Seek(offset int64, whence int) (ret int64, err error) { r.mu.Lock() defer r.mu.Unlock() switch whence { case os.SEEK_SET: ret = offset case os.SEEK_CUR: ret = r.off + int64(r.r) + offset case os.SEEK_END: return 0, errors.New("seeking relative to the end of a blob isn't supported") default: return 0, fmt.Errorf("invalid Seek whence value: %d", whence) } if ret < 0 { return 0, errors.New("negative Seek offset") } return r.seek(ret) } // fetch fetches readBufferSize bytes starting at the given offset. On success, // the data is saved as r.buf. func (r *reader) fetch(off int64) error { req := &blobpb.FetchDataRequest{ BlobKey: proto.String(string(r.blobKey)), StartIndex: proto.Int64(off), EndIndex: proto.Int64(off + readBufferSize - 1), // EndIndex is inclusive. } res := &blobpb.FetchDataResponse{} if err := internal.Call(r.c, "blobstore", "FetchData", req, res); err != nil { return err } if len(res.Data) == 0 { return io.EOF } r.buf, r.r, r.off = res.Data, 0, off return nil } // seek seeks to the given offset with an effective whence equal to SEEK_SET. // It discards the read buffer if the invariant cannot be maintained. func (r *reader) seek(off int64) (int64, error) { delta := off - r.off if delta >= 0 && delta < int64(len(r.buf)) { r.r = int(delta) return off, nil } r.buf, r.r, r.off = nil, 0, off return off, nil } ================================================ FILE: vendor/google.golang.org/appengine/capability/capability.go ================================================ // Copyright 2011 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. /* Package capability exposes information about outages and scheduled downtime for specific API capabilities. This package does not work on Managed VMs. Example: if !capability.Enabled(c, "datastore_v3", "write") { // show user a different page } */ package capability import ( "golang.org/x/net/context" "google.golang.org/appengine/internal" "google.golang.org/appengine/log" pb "google.golang.org/appengine/internal/capability" ) // Enabled returns whether an API's capabilities are enabled. // The wildcard "*" capability matches every capability of an API. // If the underlying RPC fails (if the package is unknown, for example), // false is returned and information is written to the application log. func Enabled(ctx context.Context, api, capability string) bool { req := &pb.IsEnabledRequest{ Package: &api, Capability: []string{capability}, } res := &pb.IsEnabledResponse{} if err := internal.Call(ctx, "capability_service", "IsEnabled", req, res); err != nil { log.Warningf(ctx, "capability.Enabled: RPC failed: %v", err) return false } switch *res.SummaryStatus { case pb.IsEnabledResponse_ENABLED, pb.IsEnabledResponse_SCHEDULED_FUTURE, pb.IsEnabledResponse_SCHEDULED_NOW: return true case pb.IsEnabledResponse_UNKNOWN: log.Errorf(ctx, "capability.Enabled: unknown API capability %s/%s", api, capability) return false default: return false } } ================================================ FILE: vendor/google.golang.org/appengine/channel/channel.go ================================================ // Copyright 2011 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. /* Package channel implements the server side of App Engine's Channel API. Create creates a new channel associated with the given clientID, which must be unique to the client that will use the returned token. token, err := channel.Create(c, "player1") if err != nil { // handle error } // return token to the client in an HTTP response Send sends a message to the client over the channel identified by clientID. channel.Send(c, "player1", "Game over!") */ package channel import ( "encoding/json" "golang.org/x/net/context" "google.golang.org/appengine" "google.golang.org/appengine/internal" basepb "google.golang.org/appengine/internal/base" pb "google.golang.org/appengine/internal/channel" ) // Create creates a channel and returns a token for use by the client. // The clientID is an application-provided string used to identify the client. func Create(c context.Context, clientID string) (token string, err error) { req := &pb.CreateChannelRequest{ ApplicationKey: &clientID, } resp := &pb.CreateChannelResponse{} err = internal.Call(c, service, "CreateChannel", req, resp) token = resp.GetToken() return token, remapError(err) } // Send sends a message on the channel associated with clientID. func Send(c context.Context, clientID, message string) error { req := &pb.SendMessageRequest{ ApplicationKey: &clientID, Message: &message, } resp := &basepb.VoidProto{} return remapError(internal.Call(c, service, "SendChannelMessage", req, resp)) } // SendJSON is a helper function that sends a JSON-encoded value // on the channel associated with clientID. func SendJSON(c context.Context, clientID string, value interface{}) error { m, err := json.Marshal(value) if err != nil { return err } return Send(c, clientID, string(m)) } // remapError fixes any APIError referencing "xmpp" into one referencing "channel". func remapError(err error) error { if e, ok := err.(*internal.APIError); ok { if e.Service == "xmpp" { e.Service = "channel" } } return err } var service = "xmpp" // prod func init() { if appengine.IsDevAppServer() { service = "channel" // dev } internal.RegisterErrorCodeMap("channel", pb.ChannelServiceError_ErrorCode_name) } ================================================ FILE: vendor/google.golang.org/appengine/cloudsql/cloudsql.go ================================================ // Copyright 2013 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. /* Package cloudsql exposes access to Google Cloud SQL databases. This package does not work on Managed VMs. This package is intended for MySQL drivers to make App Engine-specific connections. Applications should use this package through database/sql: Select a pure Go MySQL driver that supports this package, and use sql.Open with protocol "cloudsql" and an address of the Cloud SQL instance. A Go MySQL driver that has been tested to work well with Cloud SQL is the go-sql-driver: import "database/sql" import _ "github.com/go-sql-driver/mysql" db, err := sql.Open("mysql", "user@cloudsql(project-id:instance-name)/dbname") Another driver that works well with Cloud SQL is the mymysql driver: import "database/sql" import _ "github.com/ziutek/mymysql/godrv" db, err := sql.Open("mymysql", "cloudsql:instance-name*dbname/user/password") Using either of these drivers, you can perform a standard SQL query. This example assumes there is a table named 'users' with columns 'first_name' and 'last_name': rows, err := db.Query("SELECT first_name, last_name FROM users") if err != nil { log.Errorf(ctx, "db.Query: %v", err) } defer rows.Close() for rows.Next() { var firstName string var lastName string if err := rows.Scan(&firstName, &lastName); err != nil { log.Errorf(ctx, "rows.Scan: %v", err) continue } log.Infof(ctx, "First: %v - Last: %v", firstName, lastName) } if err := rows.Err(); err != nil { log.Errorf(ctx, "Row error: %v", err) } */ package cloudsql import ( "net" ) // Dial connects to the named Cloud SQL instance. func Dial(instance string) (net.Conn, error) { return connect(instance) } ================================================ FILE: vendor/google.golang.org/appengine/cloudsql/cloudsql_classic.go ================================================ // Copyright 2013 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. // +build appengine package cloudsql import ( "net" "appengine/cloudsql" ) func connect(instance string) (net.Conn, error) { return cloudsql.Dial(instance) } ================================================ FILE: vendor/google.golang.org/appengine/cloudsql/cloudsql_vm.go ================================================ // Copyright 2013 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. // +build !appengine package cloudsql import ( "errors" "net" ) func connect(instance string) (net.Conn, error) { return nil, errors.New("cloudsql: not supported in Managed VMs") } ================================================ FILE: vendor/google.golang.org/appengine/cmd/aebundler/aebundler.go ================================================ // Copyright 2015 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. // Program aebundler turns a Go app into a fully self-contained tar file. // The app and its subdirectories (if any) are placed under "." // and the dependencies from $GOPATH are placed under ./_gopath/src. // A main func is synthesized if one does not exist. // // A sample Dockerfile to be used with this bundler could look like this: // FROM gcr.io/google_appengine/go-compat // ADD . /app // RUN GOPATH=/app/_gopath go build -tags appenginevm -o /app/_ah/exe package main import ( "archive/tar" "flag" "fmt" "go/ast" "go/build" "go/parser" "go/token" "io" "io/ioutil" "os" "path/filepath" "strings" ) var ( output = flag.String("o", "", "name of output tar file or '-' for stdout") rootDir = flag.String("root", ".", "directory name of application root") vm = flag.Bool("vm", true, "bundle a Managed VM app") skipFiles = map[string]bool{ ".git": true, ".gitconfig": true, ".hg": true, ".travis.yml": true, } ) const ( newMain = `package main import "google.golang.org/appengine" func main() { appengine.Main() } ` ) func usage() { fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) fmt.Fprintf(os.Stderr, "\t%s -o \tBundle app to named tar file or stdout\n", os.Args[0]) fmt.Fprintf(os.Stderr, "\noptional arguments:\n") flag.PrintDefaults() } func main() { flag.Usage = usage flag.Parse() var tags []string if *vm { tags = append(tags, "appenginevm") } else { tags = append(tags, "appengine") } tarFile := *output if tarFile == "" { usage() errorf("Required -o flag not specified.") } app, err := analyze(tags) if err != nil { errorf("Error analyzing app: %v", err) } if err := app.bundle(tarFile); err != nil { errorf("Unable to bundle app: %v", err) } } // errorf prints the error message and exits. func errorf(format string, a ...interface{}) { fmt.Fprintf(os.Stderr, "aebundler: "+format+"\n", a...) os.Exit(1) } type app struct { hasMain bool appFiles []string imports map[string]string } // analyze checks the app for building with the given build tags and returns hasMain, // app files, and a map of full directory import names to original import names. func analyze(tags []string) (*app, error) { ctxt := buildContext(tags) hasMain, appFiles, err := checkMain(ctxt) if err != nil { return nil, err } gopath := filepath.SplitList(ctxt.GOPATH) im, err := imports(ctxt, *rootDir, gopath) return &app{ hasMain: hasMain, appFiles: appFiles, imports: im, }, err } // buildContext returns the context for building the source. func buildContext(tags []string) *build.Context { return &build.Context{ GOARCH: build.Default.GOARCH, GOOS: build.Default.GOOS, GOROOT: build.Default.GOROOT, GOPATH: build.Default.GOPATH, Compiler: build.Default.Compiler, BuildTags: append(build.Default.BuildTags, tags...), } } // bundle bundles the app into the named tarFile ("-"==stdout). func (s *app) bundle(tarFile string) (err error) { var out io.Writer if tarFile == "-" { out = os.Stdout } else { f, err := os.Create(tarFile) if err != nil { return err } defer func() { if cerr := f.Close(); err == nil { err = cerr } }() out = f } tw := tar.NewWriter(out) for srcDir, importName := range s.imports { dstDir := "_gopath/src/" + importName if err = copyTree(tw, dstDir, srcDir); err != nil { return fmt.Errorf("unable to copy directory %v to %v: %v", srcDir, dstDir, err) } } if err := copyTree(tw, ".", *rootDir); err != nil { return fmt.Errorf("unable to copy root directory to /app: %v", err) } if !s.hasMain { if err := synthesizeMain(tw, s.appFiles); err != nil { return fmt.Errorf("unable to synthesize new main func: %v", err) } } if err := tw.Close(); err != nil { return fmt.Errorf("unable to close tar file %v: %v", tarFile, err) } return nil } // synthesizeMain generates a new main func and writes it to the tarball. func synthesizeMain(tw *tar.Writer, appFiles []string) error { appMap := make(map[string]bool) for _, f := range appFiles { appMap[f] = true } var f string for i := 0; i < 100; i++ { f = fmt.Sprintf("app_main%d.go", i) if !appMap[filepath.Join(*rootDir, f)] { break } } if appMap[filepath.Join(*rootDir, f)] { return fmt.Errorf("unable to find unique name for %v", f) } hdr := &tar.Header{ Name: f, Mode: 0644, Size: int64(len(newMain)), } if err := tw.WriteHeader(hdr); err != nil { return fmt.Errorf("unable to write header for %v: %v", f, err) } if _, err := tw.Write([]byte(newMain)); err != nil { return fmt.Errorf("unable to write %v to tar file: %v", f, err) } return nil } // imports returns a map of all import directories (recursively) used by the app. // The return value maps full directory names to original import names. func imports(ctxt *build.Context, srcDir string, gopath []string) (map[string]string, error) { pkg, err := ctxt.ImportDir(srcDir, 0) if err != nil { return nil, fmt.Errorf("unable to analyze source: %v", err) } // Resolve all non-standard-library imports result := make(map[string]string) for _, v := range pkg.Imports { if !strings.Contains(v, ".") { continue } src, err := findInGopath(v, gopath) if err != nil { return nil, fmt.Errorf("unable to find import %v in gopath %v: %v", v, gopath, err) } result[src] = v im, err := imports(ctxt, src, gopath) if err != nil { return nil, fmt.Errorf("unable to parse package %v: %v", src, err) } for k, v := range im { result[k] = v } } return result, nil } // findInGopath searches the gopath for the named import directory. func findInGopath(dir string, gopath []string) (string, error) { for _, v := range gopath { dst := filepath.Join(v, "src", dir) if _, err := os.Stat(dst); err == nil { return dst, nil } } return "", fmt.Errorf("unable to find package %v in gopath %v", dir, gopath) } // copyTree copies srcDir to tar file dstDir, ignoring skipFiles. func copyTree(tw *tar.Writer, dstDir, srcDir string) error { entries, err := ioutil.ReadDir(srcDir) if err != nil { return fmt.Errorf("unable to read dir %v: %v", srcDir, err) } for _, entry := range entries { n := entry.Name() if skipFiles[n] { continue } s := filepath.Join(srcDir, n) d := filepath.Join(dstDir, n) if entry.IsDir() { if err := copyTree(tw, d, s); err != nil { return fmt.Errorf("unable to copy dir %v to %v: %v", s, d, err) } continue } if err := copyFile(tw, d, s); err != nil { return fmt.Errorf("unable to copy dir %v to %v: %v", s, d, err) } } return nil } // copyFile copies src to tar file dst. func copyFile(tw *tar.Writer, dst, src string) error { s, err := os.Open(src) if err != nil { return fmt.Errorf("unable to open %v: %v", src, err) } defer s.Close() fi, err := s.Stat() if err != nil { return fmt.Errorf("unable to stat %v: %v", src, err) } hdr, err := tar.FileInfoHeader(fi, dst) if err != nil { return fmt.Errorf("unable to create tar header for %v: %v", dst, err) } hdr.Name = dst if err := tw.WriteHeader(hdr); err != nil { return fmt.Errorf("unable to write header for %v: %v", dst, err) } _, err = io.Copy(tw, s) if err != nil { return fmt.Errorf("unable to copy %v to %v: %v", src, dst, err) } return nil } // checkMain verifies that there is a single "main" function. // It also returns a list of all Go source files in the app. func checkMain(ctxt *build.Context) (bool, []string, error) { pkg, err := ctxt.ImportDir(*rootDir, 0) if err != nil { return false, nil, fmt.Errorf("unable to analyze source: %v", err) } if !pkg.IsCommand() { errorf("Your app's package needs to be changed from %q to \"main\".\n", pkg.Name) } // Search for a "func main" var hasMain bool var appFiles []string for _, f := range pkg.GoFiles { n := filepath.Join(*rootDir, f) appFiles = append(appFiles, n) if hasMain, err = readFile(n); err != nil { return false, nil, fmt.Errorf("error parsing %q: %v", n, err) } } return hasMain, appFiles, nil } // isMain returns whether the given function declaration is a main function. // Such a function must be called "main", not have a receiver, and have no arguments or return types. func isMain(f *ast.FuncDecl) bool { ft := f.Type return f.Name.Name == "main" && f.Recv == nil && ft.Params.NumFields() == 0 && ft.Results.NumFields() == 0 } // readFile reads and parses the Go source code file and returns whether it has a main function. func readFile(filename string) (hasMain bool, err error) { var src []byte src, err = ioutil.ReadFile(filename) if err != nil { return } fset := token.NewFileSet() file, err := parser.ParseFile(fset, filename, src, 0) for _, decl := range file.Decls { funcDecl, ok := decl.(*ast.FuncDecl) if !ok { continue } if !isMain(funcDecl) { continue } hasMain = true break } return } ================================================ FILE: vendor/google.golang.org/appengine/cmd/aedeploy/aedeploy.go ================================================ // Copyright 2015 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. // Program aedeploy assists with deploying Go Managed VM apps to production. // A temporary directory is created; the app, its subdirectories, and all its // dependencies from $GOPATH are copied into the directory; then the app // is deployed to production with the provided command. // // The app must be in "package main". // // This command must be issued from within the root directory of the app // (where the app.yaml file is located). package main import ( "flag" "fmt" "go/build" "io" "io/ioutil" "os" "os/exec" "path/filepath" "strings" ) var ( skipFiles = map[string]bool{ ".git": true, ".gitconfig": true, ".hg": true, ".travis.yml": true, } gopathCache = map[string]string{} ) func usage() { fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) fmt.Fprintf(os.Stderr, "\t%s gcloud --verbosity debug preview app deploy --version myversion ./app.yaml\tDeploy app to production\n", os.Args[0]) } func main() { flag.Usage = usage flag.Parse() if flag.NArg() < 1 { usage() os.Exit(1) } if err := aedeploy(); err != nil { fmt.Fprintf(os.Stderr, os.Args[0]+": Error: %v\n", err) os.Exit(1) } } func aedeploy() error { tags := []string{"appenginevm"} app, err := analyze(tags) if err != nil { return err } tmpDir, err := app.bundle() if tmpDir != "" { defer os.RemoveAll(tmpDir) } if err != nil { return err } if err := os.Chdir(tmpDir); err != nil { return fmt.Errorf("unable to chdir to %v: %v", tmpDir, err) } return deploy() } // deploy calls the provided command to deploy the app from the temporary directory. func deploy() error { cmd := exec.Command(flag.Arg(0), flag.Args()[1:]...) cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr if err := cmd.Run(); err != nil { return fmt.Errorf("unable to run %q: %v", strings.Join(flag.Args(), " "), err) } return nil } type app struct { appFiles []string imports map[string]string } // analyze checks the app for building with the given build tags and returns // app files, and a map of full directory import names to original import names. func analyze(tags []string) (*app, error) { ctxt := buildContext(tags) appFiles, err := appFiles(ctxt) if err != nil { return nil, err } gopath := filepath.SplitList(ctxt.GOPATH) im, err := imports(ctxt, ".", gopath) return &app{ appFiles: appFiles, imports: im, }, err } // buildContext returns the context for building the source. func buildContext(tags []string) *build.Context { return &build.Context{ GOARCH: "amd64", GOOS: "linux", GOROOT: build.Default.GOROOT, GOPATH: build.Default.GOPATH, Compiler: build.Default.Compiler, BuildTags: append(build.Default.BuildTags, tags...), } } // bundle bundles the app into a temporary directory. func (s *app) bundle() (tmpdir string, err error) { workDir, err := ioutil.TempDir("", "aedeploy") if err != nil { return "", fmt.Errorf("unable to create tmpdir: %v", err) } for srcDir, importName := range s.imports { dstDir := "_gopath/src/" + importName if err := copyTree(workDir, dstDir, srcDir); err != nil { return workDir, fmt.Errorf("unable to copy directory %v to %v: %v", srcDir, dstDir, err) } } if err := copyTree(workDir, ".", "."); err != nil { return workDir, fmt.Errorf("unable to copy root directory to /app: %v", err) } return workDir, nil } // imports returns a map of all import directories (recursively) used by the app. // The return value maps full directory names to original import names. func imports(ctxt *build.Context, srcDir string, gopath []string) (map[string]string, error) { pkg, err := ctxt.ImportDir(srcDir, 0) if err != nil { return nil, err } // Resolve all non-standard-library imports result := make(map[string]string) for _, v := range pkg.Imports { if !strings.Contains(v, ".") { continue } src, err := findInGopath(v, gopath) if err != nil { return nil, fmt.Errorf("unable to find import %v in gopath %v: %v", v, gopath, err) } if _, ok := result[src]; ok { // Already processed continue } result[src] = v im, err := imports(ctxt, src, gopath) if err != nil { return nil, fmt.Errorf("unable to parse package %v: %v", src, err) } for k, v := range im { result[k] = v } } return result, nil } // findInGopath searches the gopath for the named import directory. func findInGopath(dir string, gopath []string) (string, error) { if v, ok := gopathCache[dir]; ok { return v, nil } for _, v := range gopath { dst := filepath.Join(v, "src", dir) if _, err := os.Stat(dst); err == nil { gopathCache[dir] = dst return dst, nil } } return "", fmt.Errorf("unable to find package %v in gopath %v", dir, gopath) } // copyTree copies srcDir to dstDir relative to dstRoot, ignoring skipFiles. func copyTree(dstRoot, dstDir, srcDir string) error { d := filepath.Join(dstRoot, dstDir) if err := os.MkdirAll(d, 0755); err != nil { return fmt.Errorf("unable to create directory %q: %v", d, err) } entries, err := ioutil.ReadDir(srcDir) if err != nil { return fmt.Errorf("unable to read dir %q: %v", srcDir, err) } for _, entry := range entries { n := entry.Name() if skipFiles[n] { continue } s := filepath.Join(srcDir, n) if entry.Mode()&os.ModeSymlink == os.ModeSymlink { if entry, err = os.Stat(s); err != nil { return fmt.Errorf("unable to stat %v: %v", s, err) } } d := filepath.Join(dstDir, n) if entry.IsDir() { if err := copyTree(dstRoot, d, s); err != nil { return fmt.Errorf("unable to copy dir %q to %q: %v", s, d, err) } continue } if err := copyFile(dstRoot, d, s); err != nil { return fmt.Errorf("unable to copy dir %q to %q: %v", s, d, err) } } return nil } // copyFile copies src to dst relative to dstRoot. func copyFile(dstRoot, dst, src string) error { s, err := os.Open(src) if err != nil { return fmt.Errorf("unable to open %q: %v", src, err) } defer s.Close() dst = filepath.Join(dstRoot, dst) d, err := os.Create(dst) if err != nil { return fmt.Errorf("unable to create %q: %v", dst, err) } _, err = io.Copy(d, s) if err != nil { d.Close() // ignore error, copy already failed. return fmt.Errorf("unable to copy %q to %q: %v", src, dst, err) } if err := d.Close(); err != nil { return fmt.Errorf("unable to close %q: %v", dst, err) } return nil } // appFiles returns a list of all Go source files in the app. func appFiles(ctxt *build.Context) ([]string, error) { pkg, err := ctxt.ImportDir(".", 0) if err != nil { return nil, err } if !pkg.IsCommand() { return nil, fmt.Errorf(`the root of your app needs to be package "main" (currently %q). Please see https://cloud.google.com/appengine/docs/go/managed-vms for more details on structuring your app.`, pkg.Name) } var appFiles []string for _, f := range pkg.GoFiles { n := filepath.Join(".", f) appFiles = append(appFiles, n) } return appFiles, nil } ================================================ FILE: vendor/google.golang.org/appengine/datastore/datastore.go ================================================ // Copyright 2011 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. package datastore import ( "errors" "fmt" "reflect" "github.com/golang/protobuf/proto" "golang.org/x/net/context" "google.golang.org/appengine" "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/datastore" ) var ( // ErrInvalidEntityType is returned when functions like Get or Next are // passed a dst or src argument of invalid type. ErrInvalidEntityType = errors.New("datastore: invalid entity type") // ErrInvalidKey is returned when an invalid key is presented. ErrInvalidKey = errors.New("datastore: invalid key") // ErrNoSuchEntity is returned when no entity was found for a given key. ErrNoSuchEntity = errors.New("datastore: no such entity") ) // ErrFieldMismatch is returned when a field is to be loaded into a different // type than the one it was stored from, or when a field is missing or // unexported in the destination struct. // StructType is the type of the struct pointed to by the destination argument // passed to Get or to Iterator.Next. type ErrFieldMismatch struct { StructType reflect.Type FieldName string Reason string } func (e *ErrFieldMismatch) Error() string { return fmt.Sprintf("datastore: cannot load field %q into a %q: %s", e.FieldName, e.StructType, e.Reason) } // protoToKey converts a Reference proto to a *Key. func protoToKey(r *pb.Reference) (k *Key, err error) { appID := r.GetApp() namespace := r.GetNameSpace() for _, e := range r.Path.Element { k = &Key{ kind: e.GetType(), stringID: e.GetName(), intID: e.GetId(), parent: k, appID: appID, namespace: namespace, } if !k.valid() { return nil, ErrInvalidKey } } return } // keyToProto converts a *Key to a Reference proto. func keyToProto(defaultAppID string, k *Key) *pb.Reference { appID := k.appID if appID == "" { appID = defaultAppID } n := 0 for i := k; i != nil; i = i.parent { n++ } e := make([]*pb.Path_Element, n) for i := k; i != nil; i = i.parent { n-- e[n] = &pb.Path_Element{ Type: &i.kind, } // At most one of {Name,Id} should be set. // Neither will be set for incomplete keys. if i.stringID != "" { e[n].Name = &i.stringID } else if i.intID != 0 { e[n].Id = &i.intID } } var namespace *string if k.namespace != "" { namespace = proto.String(k.namespace) } return &pb.Reference{ App: proto.String(appID), NameSpace: namespace, Path: &pb.Path{ Element: e, }, } } // multiKeyToProto is a batch version of keyToProto. func multiKeyToProto(appID string, key []*Key) []*pb.Reference { ret := make([]*pb.Reference, len(key)) for i, k := range key { ret[i] = keyToProto(appID, k) } return ret } // multiValid is a batch version of Key.valid. It returns an error, not a // []bool. func multiValid(key []*Key) error { invalid := false for _, k := range key { if !k.valid() { invalid = true break } } if !invalid { return nil } err := make(appengine.MultiError, len(key)) for i, k := range key { if !k.valid() { err[i] = ErrInvalidKey } } return err } // It's unfortunate that the two semantically equivalent concepts pb.Reference // and pb.PropertyValue_ReferenceValue aren't the same type. For example, the // two have different protobuf field numbers. // referenceValueToKey is the same as protoToKey except the input is a // PropertyValue_ReferenceValue instead of a Reference. func referenceValueToKey(r *pb.PropertyValue_ReferenceValue) (k *Key, err error) { appID := r.GetApp() namespace := r.GetNameSpace() for _, e := range r.Pathelement { k = &Key{ kind: e.GetType(), stringID: e.GetName(), intID: e.GetId(), parent: k, appID: appID, namespace: namespace, } if !k.valid() { return nil, ErrInvalidKey } } return } // keyToReferenceValue is the same as keyToProto except the output is a // PropertyValue_ReferenceValue instead of a Reference. func keyToReferenceValue(defaultAppID string, k *Key) *pb.PropertyValue_ReferenceValue { ref := keyToProto(defaultAppID, k) pe := make([]*pb.PropertyValue_ReferenceValue_PathElement, len(ref.Path.Element)) for i, e := range ref.Path.Element { pe[i] = &pb.PropertyValue_ReferenceValue_PathElement{ Type: e.Type, Id: e.Id, Name: e.Name, } } return &pb.PropertyValue_ReferenceValue{ App: ref.App, NameSpace: ref.NameSpace, Pathelement: pe, } } type multiArgType int const ( multiArgTypeInvalid multiArgType = iota multiArgTypePropertyLoadSaver multiArgTypeStruct multiArgTypeStructPtr multiArgTypeInterface ) // checkMultiArg checks that v has type []S, []*S, []I, or []P, for some struct // type S, for some interface type I, or some non-interface non-pointer type P // such that P or *P implements PropertyLoadSaver. // // It returns what category the slice's elements are, and the reflect.Type // that represents S, I or P. // // As a special case, PropertyList is an invalid type for v. func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) { if v.Kind() != reflect.Slice { return multiArgTypeInvalid, nil } if v.Type() == typeOfPropertyList { return multiArgTypeInvalid, nil } elemType = v.Type().Elem() if reflect.PtrTo(elemType).Implements(typeOfPropertyLoadSaver) { return multiArgTypePropertyLoadSaver, elemType } switch elemType.Kind() { case reflect.Struct: return multiArgTypeStruct, elemType case reflect.Interface: return multiArgTypeInterface, elemType case reflect.Ptr: elemType = elemType.Elem() if elemType.Kind() == reflect.Struct { return multiArgTypeStructPtr, elemType } } return multiArgTypeInvalid, nil } // Get loads the entity stored for k into dst, which must be a struct pointer // or implement PropertyLoadSaver. If there is no such entity for the key, Get // returns ErrNoSuchEntity. // // The values of dst's unmatched struct fields are not modified, and matching // slice-typed fields are not reset before appending to them. In particular, it // is recommended to pass a pointer to a zero valued struct on each Get call. // // ErrFieldMismatch is returned when a field is to be loaded into a different // type than the one it was stored from, or when a field is missing or // unexported in the destination struct. ErrFieldMismatch is only returned if // dst is a struct pointer. func Get(c context.Context, key *Key, dst interface{}) error { if dst == nil { // GetMulti catches nil interface; we need to catch nil ptr here return ErrInvalidEntityType } err := GetMulti(c, []*Key{key}, []interface{}{dst}) if me, ok := err.(appengine.MultiError); ok { return me[0] } return err } // GetMulti is a batch version of Get. // // dst must be a []S, []*S, []I or []P, for some struct type S, some interface // type I, or some non-interface non-pointer type P such that P or *P // implements PropertyLoadSaver. If an []I, each element must be a valid dst // for Get: it must be a struct pointer or implement PropertyLoadSaver. // // As a special case, PropertyList is an invalid type for dst, even though a // PropertyList is a slice of structs. It is treated as invalid to avoid being // mistakenly passed when []PropertyList was intended. func GetMulti(c context.Context, key []*Key, dst interface{}) error { v := reflect.ValueOf(dst) multiArgType, _ := checkMultiArg(v) if multiArgType == multiArgTypeInvalid { return errors.New("datastore: dst has invalid type") } if len(key) != v.Len() { return errors.New("datastore: key and dst slices have different length") } if len(key) == 0 { return nil } if err := multiValid(key); err != nil { return err } req := &pb.GetRequest{ Key: multiKeyToProto(internal.FullyQualifiedAppID(c), key), } res := &pb.GetResponse{} if err := internal.Call(c, "datastore_v3", "Get", req, res); err != nil { return err } if len(key) != len(res.Entity) { return errors.New("datastore: internal error: server returned the wrong number of entities") } multiErr, any := make(appengine.MultiError, len(key)), false for i, e := range res.Entity { if e.Entity == nil { multiErr[i] = ErrNoSuchEntity } else { elem := v.Index(i) if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct { elem = elem.Addr() } if multiArgType == multiArgTypeStructPtr && elem.IsNil() { elem.Set(reflect.New(elem.Type().Elem())) } multiErr[i] = loadEntity(elem.Interface(), e.Entity) } if multiErr[i] != nil { any = true } } if any { return multiErr } return nil } // Put saves the entity src into the datastore with key k. src must be a struct // pointer or implement PropertyLoadSaver; if a struct pointer then any // unexported fields of that struct will be skipped. If k is an incomplete key, // the returned key will be a unique key generated by the datastore. func Put(c context.Context, key *Key, src interface{}) (*Key, error) { k, err := PutMulti(c, []*Key{key}, []interface{}{src}) if err != nil { if me, ok := err.(appengine.MultiError); ok { return nil, me[0] } return nil, err } return k[0], nil } // PutMulti is a batch version of Put. // // src must satisfy the same conditions as the dst argument to GetMulti. func PutMulti(c context.Context, key []*Key, src interface{}) ([]*Key, error) { v := reflect.ValueOf(src) multiArgType, _ := checkMultiArg(v) if multiArgType == multiArgTypeInvalid { return nil, errors.New("datastore: src has invalid type") } if len(key) != v.Len() { return nil, errors.New("datastore: key and src slices have different length") } if len(key) == 0 { return nil, nil } appID := internal.FullyQualifiedAppID(c) if err := multiValid(key); err != nil { return nil, err } req := &pb.PutRequest{} for i := range key { elem := v.Index(i) if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct { elem = elem.Addr() } sProto, err := saveEntity(appID, key[i], elem.Interface()) if err != nil { return nil, err } req.Entity = append(req.Entity, sProto) } res := &pb.PutResponse{} if err := internal.Call(c, "datastore_v3", "Put", req, res); err != nil { return nil, err } if len(key) != len(res.Key) { return nil, errors.New("datastore: internal error: server returned the wrong number of keys") } ret := make([]*Key, len(key)) for i := range ret { var err error ret[i], err = protoToKey(res.Key[i]) if err != nil || ret[i].Incomplete() { return nil, errors.New("datastore: internal error: server returned an invalid key") } } return ret, nil } // Delete deletes the entity for the given key. func Delete(c context.Context, key *Key) error { err := DeleteMulti(c, []*Key{key}) if me, ok := err.(appengine.MultiError); ok { return me[0] } return err } // DeleteMulti is a batch version of Delete. func DeleteMulti(c context.Context, key []*Key) error { if len(key) == 0 { return nil } if err := multiValid(key); err != nil { return err } req := &pb.DeleteRequest{ Key: multiKeyToProto(internal.FullyQualifiedAppID(c), key), } res := &pb.DeleteResponse{} return internal.Call(c, "datastore_v3", "Delete", req, res) } func namespaceMod(m proto.Message, namespace string) { // pb.Query is the only type that has a name_space field. // All other namespace support in datastore is in the keys. switch m := m.(type) { case *pb.Query: if m.NameSpace == nil { m.NameSpace = &namespace } } } func init() { internal.NamespaceMods["datastore_v3"] = namespaceMod internal.RegisterErrorCodeMap("datastore_v3", pb.Error_ErrorCode_name) internal.RegisterTimeoutErrorCode("datastore_v3", int32(pb.Error_TIMEOUT)) } ================================================ FILE: vendor/google.golang.org/appengine/datastore/doc.go ================================================ // Copyright 2011 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. /* Package datastore provides a client for App Engine's datastore service. Basic Operations Entities are the unit of storage and are associated with a key. A key consists of an optional parent key, a string application ID, a string kind (also known as an entity type), and either a StringID or an IntID. A StringID is also known as an entity name or key name. It is valid to create a key with a zero StringID and a zero IntID; this is called an incomplete key, and does not refer to any saved entity. Putting an entity into the datastore under an incomplete key will cause a unique key to be generated for that entity, with a non-zero IntID. An entity's contents are a mapping from case-sensitive field names to values. Valid value types are: - signed integers (int, int8, int16, int32 and int64), - bool, - string, - float32 and float64, - []byte (up to 1 megabyte in length), - any type whose underlying type is one of the above predeclared types, - ByteString, - *Key, - time.Time (stored with microsecond precision), - appengine.BlobKey, - appengine.GeoPoint, - structs whose fields are all valid value types, - slices of any of the above. Slices of structs are valid, as are structs that contain slices. However, if one struct contains another, then at most one of those can be repeated. This disqualifies recursively defined struct types: any struct T that (directly or indirectly) contains a []T. The Get and Put functions load and save an entity's contents. An entity's contents are typically represented by a struct pointer. Example code: type Entity struct { Value string } func handle(w http.ResponseWriter, r *http.Request) { ctx := appengine.NewContext(r) k := datastore.NewKey(ctx, "Entity", "stringID", 0, nil) e := new(Entity) if err := datastore.Get(ctx, k, e); err != nil { http.Error(w, err.Error(), 500) return } old := e.Value e.Value = r.URL.Path if _, err := datastore.Put(ctx, k, e); err != nil { http.Error(w, err.Error(), 500) return } w.Header().Set("Content-Type", "text/plain; charset=utf-8") fmt.Fprintf(w, "old=%q\nnew=%q\n", old, e.Value) } GetMulti, PutMulti and DeleteMulti are batch versions of the Get, Put and Delete functions. They take a []*Key instead of a *Key, and may return an appengine.MultiError when encountering partial failure. Properties An entity's contents can be represented by a variety of types. These are typically struct pointers, but can also be any type that implements the PropertyLoadSaver interface. If using a struct pointer, you do not have to explicitly implement the PropertyLoadSaver interface; the datastore will automatically convert via reflection. If a struct pointer does implement that interface then those methods will be used in preference to the default behavior for struct pointers. Struct pointers are more strongly typed and are easier to use; PropertyLoadSavers are more flexible. The actual types passed do not have to match between Get and Put calls or even across different App Engine requests. It is valid to put a *PropertyList and get that same entity as a *myStruct, or put a *myStruct0 and get a *myStruct1. Conceptually, any entity is saved as a sequence of properties, and is loaded into the destination value on a property-by-property basis. When loading into a struct pointer, an entity that cannot be completely represented (such as a missing field) will result in an ErrFieldMismatch error but it is up to the caller whether this error is fatal, recoverable or ignorable. By default, for struct pointers, all properties are potentially indexed, and the property name is the same as the field name (and hence must start with an upper case letter). Fields may have a `datastore:"name,options"` tag. The tag name is the property name, which must be one or more valid Go identifiers joined by ".", but may start with a lower case letter. An empty tag name means to just use the field name. A "-" tag name means that the datastore will ignore that field. If options is "noindex" then the field will not be indexed. If the options is "" then the comma may be omitted. There are no other recognized options. Fields (except for []byte) are indexed by default. Strings longer than 1500 bytes cannot be indexed; fields used to store long strings should be tagged with "noindex". Similarly, ByteStrings longer than 1500 bytes cannot be indexed. Example code: // A and B are renamed to a and b. // A, C and J are not indexed. // D's tag is equivalent to having no tag at all (E). // I is ignored entirely by the datastore. // J has tag information for both the datastore and json packages. type TaggedStruct struct { A int `datastore:"a,noindex"` B int `datastore:"b"` C int `datastore:",noindex"` D int `datastore:""` E int I int `datastore:"-"` J int `datastore:",noindex" json:"j"` } Structured Properties If the struct pointed to contains other structs, then the nested or embedded structs are flattened. For example, given these definitions: type Inner1 struct { W int32 X string } type Inner2 struct { Y float64 } type Inner3 struct { Z bool } type Outer struct { A int16 I []Inner1 J Inner2 Inner3 } then an Outer's properties would be equivalent to those of: type OuterEquivalent struct { A int16 IDotW []int32 `datastore:"I.W"` IDotX []string `datastore:"I.X"` JDotY float64 `datastore:"J.Y"` Z bool } If Outer's embedded Inner3 field was tagged as `datastore:"Foo"` then the equivalent field would instead be: FooDotZ bool `datastore:"Foo.Z"`. If an outer struct is tagged "noindex" then all of its implicit flattened fields are effectively "noindex". The PropertyLoadSaver Interface An entity's contents can also be represented by any type that implements the PropertyLoadSaver interface. This type may be a struct pointer, but it does not have to be. The datastore package will call Load when getting the entity's contents, and Save when putting the entity's contents. Possible uses include deriving non-stored fields, verifying fields, or indexing a field only if its value is positive. Example code: type CustomPropsExample struct { I, J int // Sum is not stored, but should always be equal to I + J. Sum int `datastore:"-"` } func (x *CustomPropsExample) Load(ps []datastore.Property) error { // Load I and J as usual. if err := datastore.LoadStruct(x, ps); err != nil { return err } // Derive the Sum field. x.Sum = x.I + x.J return nil } func (x *CustomPropsExample) Save() ([]datastore.Property, error) { // Validate the Sum field. if x.Sum != x.I + x.J { return errors.New("CustomPropsExample has inconsistent sum") } // Save I and J as usual. The code below is equivalent to calling // "return datastore.SaveStruct(x)", but is done manually for // demonstration purposes. return []datastore.Property{ { Name: "I", Value: int64(x.I), }, { Name: "J", Value: int64(x.J), }, } } The *PropertyList type implements PropertyLoadSaver, and can therefore hold an arbitrary entity's contents. Queries Queries retrieve entities based on their properties or key's ancestry. Running a query yields an iterator of results: either keys or (key, entity) pairs. Queries are re-usable and it is safe to call Query.Run from concurrent goroutines. Iterators are not safe for concurrent use. Queries are immutable, and are either created by calling NewQuery, or derived from an existing query by calling a method like Filter or Order that returns a new query value. A query is typically constructed by calling NewQuery followed by a chain of zero or more such methods. These methods are: - Ancestor and Filter constrain the entities returned by running a query. - Order affects the order in which they are returned. - Project constrains the fields returned. - Distinct de-duplicates projected entities. - KeysOnly makes the iterator return only keys, not (key, entity) pairs. - Start, End, Offset and Limit define which sub-sequence of matching entities to return. Start and End take cursors, Offset and Limit take integers. Start and Offset affect the first result, End and Limit affect the last result. If both Start and Offset are set, then the offset is relative to Start. If both End and Limit are set, then the earliest constraint wins. Limit is relative to Start+Offset, not relative to End. As a special case, a negative limit means unlimited. Example code: type Widget struct { Description string Price int } func handle(w http.ResponseWriter, r *http.Request) { ctx := appengine.NewContext(r) q := datastore.NewQuery("Widget"). Filter("Price <", 1000). Order("-Price") b := new(bytes.Buffer) for t := q.Run(ctx); ; { var x Widget key, err := t.Next(&x) if err == datastore.Done { break } if err != nil { serveError(ctx, w, err) return } fmt.Fprintf(b, "Key=%v\nWidget=%#v\n\n", key, x) } w.Header().Set("Content-Type", "text/plain; charset=utf-8") io.Copy(w, b) } Transactions RunInTransaction runs a function in a transaction. Example code: type Counter struct { Count int } func inc(ctx context.Context, key *datastore.Key) (int, error) { var x Counter if err := datastore.Get(ctx, key, &x); err != nil && err != datastore.ErrNoSuchEntity { return 0, err } x.Count++ if _, err := datastore.Put(ctx, key, &x); err != nil { return 0, err } return x.Count, nil } func handle(w http.ResponseWriter, r *http.Request) { ctx := appengine.NewContext(r) var count int err := datastore.RunInTransaction(ctx, func(ctx context.Context) error { var err1 error count, err1 = inc(ctx, datastore.NewKey(ctx, "Counter", "singleton", 0, nil)) return err1 }, nil) if err != nil { serveError(ctx, w, err) return } w.Header().Set("Content-Type", "text/plain; charset=utf-8") fmt.Fprintf(w, "Count=%d", count) } Metadata The datastore package provides access to some of App Engine's datastore metadata. This metadata includes information about the entity groups, namespaces, entity kinds, and properties in the datastore, as well as the property representations for each property. Example code: func handle(w http.ResponseWriter, r *http.Request) { // Print all the kinds in the datastore, with all the indexed // properties (and their representations) for each. ctx := appengine.NewContext(r) kinds, err := datastore.Kinds(ctx) if err != nil { serveError(ctx, w, err) return } w.Header().Set("Content-Type", "text/plain; charset=utf-8") for _, kind := range kinds { fmt.Fprintf(w, "%s:\n", kind) props, err := datastore.KindProperties(ctx, kind) if err != nil { fmt.Fprintln(w, "\t(unable to retrieve properties)") continue } for p, rep := range props { fmt.Fprintf(w, "\t-%s (%s)\n", p, strings.Join(", ", rep)) } } } */ package datastore ================================================ FILE: vendor/google.golang.org/appengine/datastore/key.go ================================================ // Copyright 2011 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. package datastore import ( "bytes" "encoding/base64" "encoding/gob" "errors" "fmt" "strconv" "strings" "github.com/golang/protobuf/proto" "golang.org/x/net/context" "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/datastore" ) // Key represents the datastore key for a stored entity, and is immutable. type Key struct { kind string stringID string intID int64 parent *Key appID string namespace string } // Kind returns the key's kind (also known as entity type). func (k *Key) Kind() string { return k.kind } // StringID returns the key's string ID (also known as an entity name or key // name), which may be "". func (k *Key) StringID() string { return k.stringID } // IntID returns the key's integer ID, which may be 0. func (k *Key) IntID() int64 { return k.intID } // Parent returns the key's parent key, which may be nil. func (k *Key) Parent() *Key { return k.parent } // AppID returns the key's application ID. func (k *Key) AppID() string { return k.appID } // Namespace returns the key's namespace. func (k *Key) Namespace() string { return k.namespace } // Incomplete returns whether the key does not refer to a stored entity. // In particular, whether the key has a zero StringID and a zero IntID. func (k *Key) Incomplete() bool { return k.stringID == "" && k.intID == 0 } // valid returns whether the key is valid. func (k *Key) valid() bool { if k == nil { return false } for ; k != nil; k = k.parent { if k.kind == "" || k.appID == "" { return false } if k.stringID != "" && k.intID != 0 { return false } if k.parent != nil { if k.parent.Incomplete() { return false } if k.parent.appID != k.appID || k.parent.namespace != k.namespace { return false } } } return true } // Equal returns whether two keys are equal. func (k *Key) Equal(o *Key) bool { for k != nil && o != nil { if k.kind != o.kind || k.stringID != o.stringID || k.intID != o.intID || k.appID != o.appID || k.namespace != o.namespace { return false } k, o = k.parent, o.parent } return k == o } // root returns the furthest ancestor of a key, which may be itself. func (k *Key) root() *Key { for k.parent != nil { k = k.parent } return k } // marshal marshals the key's string representation to the buffer. func (k *Key) marshal(b *bytes.Buffer) { if k.parent != nil { k.parent.marshal(b) } b.WriteByte('/') b.WriteString(k.kind) b.WriteByte(',') if k.stringID != "" { b.WriteString(k.stringID) } else { b.WriteString(strconv.FormatInt(k.intID, 10)) } } // String returns a string representation of the key. func (k *Key) String() string { if k == nil { return "" } b := bytes.NewBuffer(make([]byte, 0, 512)) k.marshal(b) return b.String() } type gobKey struct { Kind string StringID string IntID int64 Parent *gobKey AppID string Namespace string } func keyToGobKey(k *Key) *gobKey { if k == nil { return nil } return &gobKey{ Kind: k.kind, StringID: k.stringID, IntID: k.intID, Parent: keyToGobKey(k.parent), AppID: k.appID, Namespace: k.namespace, } } func gobKeyToKey(gk *gobKey) *Key { if gk == nil { return nil } return &Key{ kind: gk.Kind, stringID: gk.StringID, intID: gk.IntID, parent: gobKeyToKey(gk.Parent), appID: gk.AppID, namespace: gk.Namespace, } } func (k *Key) GobEncode() ([]byte, error) { buf := new(bytes.Buffer) if err := gob.NewEncoder(buf).Encode(keyToGobKey(k)); err != nil { return nil, err } return buf.Bytes(), nil } func (k *Key) GobDecode(buf []byte) error { gk := new(gobKey) if err := gob.NewDecoder(bytes.NewBuffer(buf)).Decode(gk); err != nil { return err } *k = *gobKeyToKey(gk) return nil } func (k *Key) MarshalJSON() ([]byte, error) { return []byte(`"` + k.Encode() + `"`), nil } func (k *Key) UnmarshalJSON(buf []byte) error { if len(buf) < 2 || buf[0] != '"' || buf[len(buf)-1] != '"' { return errors.New("datastore: bad JSON key") } k2, err := DecodeKey(string(buf[1 : len(buf)-1])) if err != nil { return err } *k = *k2 return nil } // Encode returns an opaque representation of the key // suitable for use in HTML and URLs. // This is compatible with the Python and Java runtimes. func (k *Key) Encode() string { ref := keyToProto("", k) b, err := proto.Marshal(ref) if err != nil { panic(err) } // Trailing padding is stripped. return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") } // DecodeKey decodes a key from the opaque representation returned by Encode. func DecodeKey(encoded string) (*Key, error) { // Re-add padding. if m := len(encoded) % 4; m != 0 { encoded += strings.Repeat("=", 4-m) } b, err := base64.URLEncoding.DecodeString(encoded) if err != nil { return nil, err } ref := new(pb.Reference) if err := proto.Unmarshal(b, ref); err != nil { return nil, err } return protoToKey(ref) } // NewIncompleteKey creates a new incomplete key. // kind cannot be empty. func NewIncompleteKey(c context.Context, kind string, parent *Key) *Key { return NewKey(c, kind, "", 0, parent) } // NewKey creates a new key. // kind cannot be empty. // Either one or both of stringID and intID must be zero. If both are zero, // the key returned is incomplete. // parent must either be a complete key or nil. func NewKey(c context.Context, kind, stringID string, intID int64, parent *Key) *Key { // If there's a parent key, use its namespace. // Otherwise, use any namespace attached to the context. var namespace string if parent != nil { namespace = parent.namespace } else { namespace = internal.NamespaceFromContext(c) } return &Key{ kind: kind, stringID: stringID, intID: intID, parent: parent, appID: internal.FullyQualifiedAppID(c), namespace: namespace, } } // AllocateIDs returns a range of n integer IDs with the given kind and parent // combination. kind cannot be empty; parent may be nil. The IDs in the range // returned will not be used by the datastore's automatic ID sequence generator // and may be used with NewKey without conflict. // // The range is inclusive at the low end and exclusive at the high end. In // other words, valid intIDs x satisfy low <= x && x < high. // // If no error is returned, low + n == high. func AllocateIDs(c context.Context, kind string, parent *Key, n int) (low, high int64, err error) { if kind == "" { return 0, 0, errors.New("datastore: AllocateIDs given an empty kind") } if n < 0 { return 0, 0, fmt.Errorf("datastore: AllocateIDs given a negative count: %d", n) } if n == 0 { return 0, 0, nil } req := &pb.AllocateIdsRequest{ ModelKey: keyToProto("", NewIncompleteKey(c, kind, parent)), Size: proto.Int64(int64(n)), } res := &pb.AllocateIdsResponse{} if err := internal.Call(c, "datastore_v3", "AllocateIds", req, res); err != nil { return 0, 0, err } // The protobuf is inclusive at both ends. Idiomatic Go (e.g. slices, for loops) // is inclusive at the low end and exclusive at the high end, so we add 1. low = res.GetStart() high = res.GetEnd() + 1 if low+int64(n) != high { return 0, 0, fmt.Errorf("datastore: internal error: could not allocate %d IDs", n) } return low, high, nil } ================================================ FILE: vendor/google.golang.org/appengine/datastore/load.go ================================================ // Copyright 2011 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. package datastore import ( "fmt" "reflect" "time" "google.golang.org/appengine" pb "google.golang.org/appengine/internal/datastore" ) var ( typeOfBlobKey = reflect.TypeOf(appengine.BlobKey("")) typeOfByteSlice = reflect.TypeOf([]byte(nil)) typeOfByteString = reflect.TypeOf(ByteString(nil)) typeOfGeoPoint = reflect.TypeOf(appengine.GeoPoint{}) typeOfTime = reflect.TypeOf(time.Time{}) ) // typeMismatchReason returns a string explaining why the property p could not // be stored in an entity field of type v.Type(). func typeMismatchReason(p Property, v reflect.Value) string { entityType := "empty" switch p.Value.(type) { case int64: entityType = "int" case bool: entityType = "bool" case string: entityType = "string" case float64: entityType = "float" case *Key: entityType = "*datastore.Key" case time.Time: entityType = "time.Time" case appengine.BlobKey: entityType = "appengine.BlobKey" case appengine.GeoPoint: entityType = "appengine.GeoPoint" case ByteString: entityType = "datastore.ByteString" case []byte: entityType = "[]byte" } return fmt.Sprintf("type mismatch: %s versus %v", entityType, v.Type()) } type propertyLoader struct { // m holds the number of times a substruct field like "Foo.Bar.Baz" has // been seen so far. The map is constructed lazily. m map[string]int } func (l *propertyLoader) load(codec *structCodec, structValue reflect.Value, p Property, requireSlice bool) string { var v reflect.Value // Traverse a struct's struct-typed fields. for name := p.Name; ; { decoder, ok := codec.byName[name] if !ok { return "no such struct field" } v = structValue.Field(decoder.index) if !v.IsValid() { return "no such struct field" } if !v.CanSet() { return "cannot set struct field" } if decoder.substructCodec == nil { break } if v.Kind() == reflect.Slice { if l.m == nil { l.m = make(map[string]int) } index := l.m[p.Name] l.m[p.Name] = index + 1 for v.Len() <= index { v.Set(reflect.Append(v, reflect.New(v.Type().Elem()).Elem())) } structValue = v.Index(index) requireSlice = false } else { structValue = v } // Strip the "I." from "I.X". name = name[len(codec.byIndex[decoder.index].name):] codec = decoder.substructCodec } var slice reflect.Value if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { slice = v v = reflect.New(v.Type().Elem()).Elem() } else if requireSlice { return "multiple-valued property requires a slice field type" } // Convert indexValues to a Go value with a meaning derived from the // destination type. pValue := p.Value if iv, ok := pValue.(indexValue); ok { meaning := pb.Property_NO_MEANING switch v.Type() { case typeOfBlobKey: meaning = pb.Property_BLOBKEY case typeOfByteSlice: meaning = pb.Property_BLOB case typeOfByteString: meaning = pb.Property_BYTESTRING case typeOfGeoPoint: meaning = pb.Property_GEORSS_POINT case typeOfTime: meaning = pb.Property_GD_WHEN } var err error pValue, err = propValue(iv.value, meaning) if err != nil { return err.Error() } } switch v.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: x, ok := pValue.(int64) if !ok && pValue != nil { return typeMismatchReason(p, v) } if v.OverflowInt(x) { return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type()) } v.SetInt(x) case reflect.Bool: x, ok := pValue.(bool) if !ok && pValue != nil { return typeMismatchReason(p, v) } v.SetBool(x) case reflect.String: switch x := pValue.(type) { case appengine.BlobKey: v.SetString(string(x)) case ByteString: v.SetString(string(x)) case string: v.SetString(x) default: if pValue != nil { return typeMismatchReason(p, v) } } case reflect.Float32, reflect.Float64: x, ok := pValue.(float64) if !ok && pValue != nil { return typeMismatchReason(p, v) } if v.OverflowFloat(x) { return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type()) } v.SetFloat(x) case reflect.Ptr: x, ok := pValue.(*Key) if !ok && pValue != nil { return typeMismatchReason(p, v) } if _, ok := v.Interface().(*Key); !ok { return typeMismatchReason(p, v) } v.Set(reflect.ValueOf(x)) case reflect.Struct: switch v.Type() { case typeOfTime: x, ok := pValue.(time.Time) if !ok && pValue != nil { return typeMismatchReason(p, v) } v.Set(reflect.ValueOf(x)) case typeOfGeoPoint: x, ok := pValue.(appengine.GeoPoint) if !ok && pValue != nil { return typeMismatchReason(p, v) } v.Set(reflect.ValueOf(x)) default: return typeMismatchReason(p, v) } case reflect.Slice: x, ok := pValue.([]byte) if !ok { if y, yok := pValue.(ByteString); yok { x, ok = []byte(y), true } } if !ok && pValue != nil { return typeMismatchReason(p, v) } if v.Type().Elem().Kind() != reflect.Uint8 { return typeMismatchReason(p, v) } v.SetBytes(x) default: return typeMismatchReason(p, v) } if slice.IsValid() { slice.Set(reflect.Append(slice, v)) } return "" } // loadEntity loads an EntityProto into PropertyLoadSaver or struct pointer. func loadEntity(dst interface{}, src *pb.EntityProto) (err error) { props, err := protoToProperties(src) if err != nil { return err } if e, ok := dst.(PropertyLoadSaver); ok { return e.Load(props) } return LoadStruct(dst, props) } func (s structPLS) Load(props []Property) error { var fieldName, reason string var l propertyLoader for _, p := range props { if errStr := l.load(s.codec, s.v, p, p.Multiple); errStr != "" { // We don't return early, as we try to load as many properties as possible. // It is valid to load an entity into a struct that cannot fully represent it. // That case returns an error, but the caller is free to ignore it. fieldName, reason = p.Name, errStr } } if reason != "" { return &ErrFieldMismatch{ StructType: s.v.Type(), FieldName: fieldName, Reason: reason, } } return nil } func protoToProperties(src *pb.EntityProto) ([]Property, error) { props, rawProps := src.Property, src.RawProperty out := make([]Property, 0, len(props)+len(rawProps)) for { var ( x *pb.Property noIndex bool ) if len(props) > 0 { x, props = props[0], props[1:] } else if len(rawProps) > 0 { x, rawProps = rawProps[0], rawProps[1:] noIndex = true } else { break } var value interface{} if x.Meaning != nil && *x.Meaning == pb.Property_INDEX_VALUE { value = indexValue{x.Value} } else { var err error value, err = propValue(x.Value, x.GetMeaning()) if err != nil { return nil, err } } out = append(out, Property{ Name: x.GetName(), Value: value, NoIndex: noIndex, Multiple: x.GetMultiple(), }) } return out, nil } // propValue returns a Go value that combines the raw PropertyValue with a // meaning. For example, an Int64Value with GD_WHEN becomes a time.Time. func propValue(v *pb.PropertyValue, m pb.Property_Meaning) (interface{}, error) { switch { case v.Int64Value != nil: if m == pb.Property_GD_WHEN { return fromUnixMicro(*v.Int64Value), nil } else { return *v.Int64Value, nil } case v.BooleanValue != nil: return *v.BooleanValue, nil case v.StringValue != nil: if m == pb.Property_BLOB { return []byte(*v.StringValue), nil } else if m == pb.Property_BLOBKEY { return appengine.BlobKey(*v.StringValue), nil } else if m == pb.Property_BYTESTRING { return ByteString(*v.StringValue), nil } else { return *v.StringValue, nil } case v.DoubleValue != nil: return *v.DoubleValue, nil case v.Referencevalue != nil: key, err := referenceValueToKey(v.Referencevalue) if err != nil { return nil, err } return key, nil case v.Pointvalue != nil: // NOTE: Strangely, latitude maps to X, longitude to Y. return appengine.GeoPoint{Lat: v.Pointvalue.GetX(), Lng: v.Pointvalue.GetY()}, nil } return nil, nil } // indexValue is a Property value that is created when entities are loaded from // an index, such as from a projection query. // // Such Property values do not contain all of the metadata required to be // faithfully represented as a Go value, and are instead represented as an // opaque indexValue. Load the properties into a concrete struct type (e.g. by // passing a struct pointer to Iterator.Next) to reconstruct actual Go values // of type int, string, time.Time, etc. type indexValue struct { value *pb.PropertyValue } ================================================ FILE: vendor/google.golang.org/appengine/datastore/metadata.go ================================================ // Copyright 2016 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. package datastore import "golang.org/x/net/context" // Datastore kinds for the metadata entities. const ( namespaceKind = "__namespace__" kindKind = "__kind__" propertyKind = "__property__" entityGroupKind = "__entitygroup__" ) // Namespaces returns all the datastore namespaces. func Namespaces(ctx context.Context) ([]string, error) { // TODO(djd): Support range queries. q := NewQuery(namespaceKind).KeysOnly() keys, err := q.GetAll(ctx, nil) if err != nil { return nil, err } // The empty namespace key uses a numeric ID (==1), but luckily // the string ID defaults to "" for numeric IDs anyway. return keyNames(keys), nil } // Kinds returns the names of all the kinds in the current namespace. func Kinds(ctx context.Context) ([]string, error) { // TODO(djd): Support range queries. q := NewQuery(kindKind).KeysOnly() keys, err := q.GetAll(ctx, nil) if err != nil { return nil, err } return keyNames(keys), nil } // keyNames returns a slice of the provided keys' names (string IDs). func keyNames(keys []*Key) []string { n := make([]string, 0, len(keys)) for _, k := range keys { n = append(n, k.StringID()) } return n } // KindProperties returns all the indexed properties for the given kind. // The properties are returned as a map of property names to a slice of the // representation types. The representation types for the supported Go property // types are: // "INT64": signed integers and time.Time // "DOUBLE": float32 and float64 // "BOOLEAN": bool // "STRING": string, []byte and ByteString // "POINT": appengine.GeoPoint // "REFERENCE": *Key // "USER": (not used in the Go runtime) func KindProperties(ctx context.Context, kind string) (map[string][]string, error) { // TODO(djd): Support range queries. kindKey := NewKey(ctx, kindKind, kind, 0, nil) q := NewQuery(propertyKind).Ancestor(kindKey) propMap := map[string][]string{} props := []struct { Repr []string `datastore:property_representation` }{} keys, err := q.GetAll(ctx, &props) if err != nil { return nil, err } for i, p := range props { propMap[keys[i].StringID()] = p.Repr } return propMap, nil } ================================================ FILE: vendor/google.golang.org/appengine/datastore/prop.go ================================================ // Copyright 2011 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. package datastore import ( "fmt" "reflect" "strings" "sync" "unicode" ) // Entities with more than this many indexed properties will not be saved. const maxIndexedProperties = 20000 // []byte fields more than 1 megabyte long will not be loaded or saved. const maxBlobLen = 1 << 20 // Property is a name/value pair plus some metadata. A datastore entity's // contents are loaded and saved as a sequence of Properties. An entity can // have multiple Properties with the same name, provided that p.Multiple is // true on all of that entity's Properties with that name. type Property struct { // Name is the property name. Name string // Value is the property value. The valid types are: // - int64 // - bool // - string // - float64 // - ByteString // - *Key // - time.Time // - appengine.BlobKey // - appengine.GeoPoint // - []byte (up to 1 megabyte in length) // This set is smaller than the set of valid struct field types that the // datastore can load and save. A Property Value cannot be a slice (apart // from []byte); use multiple Properties instead. Also, a Value's type // must be explicitly on the list above; it is not sufficient for the // underlying type to be on that list. For example, a Value of "type // myInt64 int64" is invalid. Smaller-width integers and floats are also // invalid. Again, this is more restrictive than the set of valid struct // field types. // // A Value will have an opaque type when loading entities from an index, // such as via a projection query. Load entities into a struct instead // of a PropertyLoadSaver when using a projection query. // // A Value may also be the nil interface value; this is equivalent to // Python's None but not directly representable by a Go struct. Loading // a nil-valued property into a struct will set that field to the zero // value. Value interface{} // NoIndex is whether the datastore cannot index this property. NoIndex bool // Multiple is whether the entity can have multiple properties with // the same name. Even if a particular instance only has one property with // a certain name, Multiple should be true if a struct would best represent // it as a field of type []T instead of type T. Multiple bool } // ByteString is a short byte slice (up to 1500 bytes) that can be indexed. type ByteString []byte // PropertyLoadSaver can be converted from and to a slice of Properties. type PropertyLoadSaver interface { Load([]Property) error Save() ([]Property, error) } // PropertyList converts a []Property to implement PropertyLoadSaver. type PropertyList []Property var ( typeOfPropertyLoadSaver = reflect.TypeOf((*PropertyLoadSaver)(nil)).Elem() typeOfPropertyList = reflect.TypeOf(PropertyList(nil)) ) // Load loads all of the provided properties into l. // It does not first reset *l to an empty slice. func (l *PropertyList) Load(p []Property) error { *l = append(*l, p...) return nil } // Save saves all of l's properties as a slice or Properties. func (l *PropertyList) Save() ([]Property, error) { return *l, nil } // validPropertyName returns whether name consists of one or more valid Go // identifiers joined by ".". func validPropertyName(name string) bool { if name == "" { return false } for _, s := range strings.Split(name, ".") { if s == "" { return false } first := true for _, c := range s { if first { first = false if c != '_' && !unicode.IsLetter(c) { return false } } else { if c != '_' && !unicode.IsLetter(c) && !unicode.IsDigit(c) { return false } } } } return true } // structTag is the parsed `datastore:"name,options"` tag of a struct field. // If a field has no tag, or the tag has an empty name, then the structTag's // name is just the field name. A "-" name means that the datastore ignores // that field. type structTag struct { name string noIndex bool } // structCodec describes how to convert a struct to and from a sequence of // properties. type structCodec struct { // byIndex gives the structTag for the i'th field. byIndex []structTag // byName gives the field codec for the structTag with the given name. byName map[string]fieldCodec // hasSlice is whether a struct or any of its nested or embedded structs // has a slice-typed field (other than []byte). hasSlice bool // complete is whether the structCodec is complete. An incomplete // structCodec may be encountered when walking a recursive struct. complete bool } // fieldCodec is a struct field's index and, if that struct field's type is // itself a struct, that substruct's structCodec. type fieldCodec struct { index int substructCodec *structCodec } // structCodecs collects the structCodecs that have already been calculated. var ( structCodecsMutex sync.Mutex structCodecs = make(map[reflect.Type]*structCodec) ) // getStructCodec returns the structCodec for the given struct type. func getStructCodec(t reflect.Type) (*structCodec, error) { structCodecsMutex.Lock() defer structCodecsMutex.Unlock() return getStructCodecLocked(t) } // getStructCodecLocked implements getStructCodec. The structCodecsMutex must // be held when calling this function. func getStructCodecLocked(t reflect.Type) (ret *structCodec, retErr error) { c, ok := structCodecs[t] if ok { return c, nil } c = &structCodec{ byIndex: make([]structTag, t.NumField()), byName: make(map[string]fieldCodec), } // Add c to the structCodecs map before we are sure it is good. If t is // a recursive type, it needs to find the incomplete entry for itself in // the map. structCodecs[t] = c defer func() { if retErr != nil { delete(structCodecs, t) } }() for i := range c.byIndex { f := t.Field(i) name, opts := f.Tag.Get("datastore"), "" if i := strings.Index(name, ","); i != -1 { name, opts = name[:i], name[i+1:] } if name == "" { if !f.Anonymous { name = f.Name } } else if name == "-" { c.byIndex[i] = structTag{name: name} continue } else if !validPropertyName(name) { return nil, fmt.Errorf("datastore: struct tag has invalid property name: %q", name) } substructType, fIsSlice := reflect.Type(nil), false switch f.Type.Kind() { case reflect.Struct: substructType = f.Type case reflect.Slice: if f.Type.Elem().Kind() == reflect.Struct { substructType = f.Type.Elem() } fIsSlice = f.Type != typeOfByteSlice c.hasSlice = c.hasSlice || fIsSlice } if substructType != nil && substructType != typeOfTime && substructType != typeOfGeoPoint { if name != "" { name = name + "." } sub, err := getStructCodecLocked(substructType) if err != nil { return nil, err } if !sub.complete { return nil, fmt.Errorf("datastore: recursive struct: field %q", f.Name) } if fIsSlice && sub.hasSlice { return nil, fmt.Errorf( "datastore: flattening nested structs leads to a slice of slices: field %q", f.Name) } c.hasSlice = c.hasSlice || sub.hasSlice for relName := range sub.byName { absName := name + relName if _, ok := c.byName[absName]; ok { return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", absName) } c.byName[absName] = fieldCodec{index: i, substructCodec: sub} } } else { if _, ok := c.byName[name]; ok { return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", name) } c.byName[name] = fieldCodec{index: i} } c.byIndex[i] = structTag{ name: name, noIndex: opts == "noindex", } } c.complete = true return c, nil } // structPLS adapts a struct to be a PropertyLoadSaver. type structPLS struct { v reflect.Value codec *structCodec } // newStructPLS returns a PropertyLoadSaver for the struct pointer p. func newStructPLS(p interface{}) (PropertyLoadSaver, error) { v := reflect.ValueOf(p) if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { return nil, ErrInvalidEntityType } v = v.Elem() codec, err := getStructCodec(v.Type()) if err != nil { return nil, err } return structPLS{v, codec}, nil } // LoadStruct loads the properties from p to dst. // dst must be a struct pointer. func LoadStruct(dst interface{}, p []Property) error { x, err := newStructPLS(dst) if err != nil { return err } return x.Load(p) } // SaveStruct returns the properties from src as a slice of Properties. // src must be a struct pointer. func SaveStruct(src interface{}) ([]Property, error) { x, err := newStructPLS(src) if err != nil { return nil, err } return x.Save() } ================================================ FILE: vendor/google.golang.org/appengine/datastore/query.go ================================================ // Copyright 2011 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. package datastore import ( "encoding/base64" "errors" "fmt" "math" "reflect" "strings" "github.com/golang/protobuf/proto" "golang.org/x/net/context" "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/datastore" ) type operator int const ( lessThan operator = iota lessEq equal greaterEq greaterThan ) var operatorToProto = map[operator]*pb.Query_Filter_Operator{ lessThan: pb.Query_Filter_LESS_THAN.Enum(), lessEq: pb.Query_Filter_LESS_THAN_OR_EQUAL.Enum(), equal: pb.Query_Filter_EQUAL.Enum(), greaterEq: pb.Query_Filter_GREATER_THAN_OR_EQUAL.Enum(), greaterThan: pb.Query_Filter_GREATER_THAN.Enum(), } // filter is a conditional filter on query results. type filter struct { FieldName string Op operator Value interface{} } type sortDirection int const ( ascending sortDirection = iota descending ) var sortDirectionToProto = map[sortDirection]*pb.Query_Order_Direction{ ascending: pb.Query_Order_ASCENDING.Enum(), descending: pb.Query_Order_DESCENDING.Enum(), } // order is a sort order on query results. type order struct { FieldName string Direction sortDirection } // NewQuery creates a new Query for a specific entity kind. // // An empty kind means to return all entities, including entities created and // managed by other App Engine features, and is called a kindless query. // Kindless queries cannot include filters or sort orders on property values. func NewQuery(kind string) *Query { return &Query{ kind: kind, limit: -1, } } // Query represents a datastore query. type Query struct { kind string ancestor *Key filter []filter order []order projection []string distinct bool keysOnly bool eventual bool limit int32 offset int32 start *pb.CompiledCursor end *pb.CompiledCursor err error } func (q *Query) clone() *Query { x := *q // Copy the contents of the slice-typed fields to a new backing store. if len(q.filter) > 0 { x.filter = make([]filter, len(q.filter)) copy(x.filter, q.filter) } if len(q.order) > 0 { x.order = make([]order, len(q.order)) copy(x.order, q.order) } return &x } // Ancestor returns a derivative query with an ancestor filter. // The ancestor should not be nil. func (q *Query) Ancestor(ancestor *Key) *Query { q = q.clone() if ancestor == nil { q.err = errors.New("datastore: nil query ancestor") return q } q.ancestor = ancestor return q } // EventualConsistency returns a derivative query that returns eventually // consistent results. // It only has an effect on ancestor queries. func (q *Query) EventualConsistency() *Query { q = q.clone() q.eventual = true return q } // Filter returns a derivative query with a field-based filter. // The filterStr argument must be a field name followed by optional space, // followed by an operator, one of ">", "<", ">=", "<=", or "=". // Fields are compared against the provided value using the operator. // Multiple filters are AND'ed together. func (q *Query) Filter(filterStr string, value interface{}) *Query { q = q.clone() filterStr = strings.TrimSpace(filterStr) if len(filterStr) < 1 { q.err = errors.New("datastore: invalid filter: " + filterStr) return q } f := filter{ FieldName: strings.TrimRight(filterStr, " ><=!"), Value: value, } switch op := strings.TrimSpace(filterStr[len(f.FieldName):]); op { case "<=": f.Op = lessEq case ">=": f.Op = greaterEq case "<": f.Op = lessThan case ">": f.Op = greaterThan case "=": f.Op = equal default: q.err = fmt.Errorf("datastore: invalid operator %q in filter %q", op, filterStr) return q } q.filter = append(q.filter, f) return q } // Order returns a derivative query with a field-based sort order. Orders are // applied in the order they are added. The default order is ascending; to sort // in descending order prefix the fieldName with a minus sign (-). func (q *Query) Order(fieldName string) *Query { q = q.clone() fieldName = strings.TrimSpace(fieldName) o := order{ Direction: ascending, FieldName: fieldName, } if strings.HasPrefix(fieldName, "-") { o.Direction = descending o.FieldName = strings.TrimSpace(fieldName[1:]) } else if strings.HasPrefix(fieldName, "+") { q.err = fmt.Errorf("datastore: invalid order: %q", fieldName) return q } if len(o.FieldName) == 0 { q.err = errors.New("datastore: empty order") return q } q.order = append(q.order, o) return q } // Project returns a derivative query that yields only the given fields. It // cannot be used with KeysOnly. func (q *Query) Project(fieldNames ...string) *Query { q = q.clone() q.projection = append([]string(nil), fieldNames...) return q } // Distinct returns a derivative query that yields de-duplicated entities with // respect to the set of projected fields. It is only used for projection // queries. func (q *Query) Distinct() *Query { q = q.clone() q.distinct = true return q } // KeysOnly returns a derivative query that yields only keys, not keys and // entities. It cannot be used with projection queries. func (q *Query) KeysOnly() *Query { q = q.clone() q.keysOnly = true return q } // Limit returns a derivative query that has a limit on the number of results // returned. A negative value means unlimited. func (q *Query) Limit(limit int) *Query { q = q.clone() if limit < math.MinInt32 || limit > math.MaxInt32 { q.err = errors.New("datastore: query limit overflow") return q } q.limit = int32(limit) return q } // Offset returns a derivative query that has an offset of how many keys to // skip over before returning results. A negative value is invalid. func (q *Query) Offset(offset int) *Query { q = q.clone() if offset < 0 { q.err = errors.New("datastore: negative query offset") return q } if offset > math.MaxInt32 { q.err = errors.New("datastore: query offset overflow") return q } q.offset = int32(offset) return q } // Start returns a derivative query with the given start point. func (q *Query) Start(c Cursor) *Query { q = q.clone() if c.cc == nil { q.err = errors.New("datastore: invalid cursor") return q } q.start = c.cc return q } // End returns a derivative query with the given end point. func (q *Query) End(c Cursor) *Query { q = q.clone() if c.cc == nil { q.err = errors.New("datastore: invalid cursor") return q } q.end = c.cc return q } // toProto converts the query to a protocol buffer. func (q *Query) toProto(dst *pb.Query, appID string) error { if len(q.projection) != 0 && q.keysOnly { return errors.New("datastore: query cannot both project and be keys-only") } dst.Reset() dst.App = proto.String(appID) if q.kind != "" { dst.Kind = proto.String(q.kind) } if q.ancestor != nil { dst.Ancestor = keyToProto(appID, q.ancestor) if q.eventual { dst.Strong = proto.Bool(false) } } if q.projection != nil { dst.PropertyName = q.projection if q.distinct { dst.GroupByPropertyName = q.projection } } if q.keysOnly { dst.KeysOnly = proto.Bool(true) dst.RequirePerfectPlan = proto.Bool(true) } for _, qf := range q.filter { if qf.FieldName == "" { return errors.New("datastore: empty query filter field name") } p, errStr := valueToProto(appID, qf.FieldName, reflect.ValueOf(qf.Value), false) if errStr != "" { return errors.New("datastore: bad query filter value type: " + errStr) } xf := &pb.Query_Filter{ Op: operatorToProto[qf.Op], Property: []*pb.Property{p}, } if xf.Op == nil { return errors.New("datastore: unknown query filter operator") } dst.Filter = append(dst.Filter, xf) } for _, qo := range q.order { if qo.FieldName == "" { return errors.New("datastore: empty query order field name") } xo := &pb.Query_Order{ Property: proto.String(qo.FieldName), Direction: sortDirectionToProto[qo.Direction], } if xo.Direction == nil { return errors.New("datastore: unknown query order direction") } dst.Order = append(dst.Order, xo) } if q.limit >= 0 { dst.Limit = proto.Int32(q.limit) } if q.offset != 0 { dst.Offset = proto.Int32(q.offset) } dst.CompiledCursor = q.start dst.EndCompiledCursor = q.end dst.Compile = proto.Bool(true) return nil } // Count returns the number of results for the query. func (q *Query) Count(c context.Context) (int, error) { // Check that the query is well-formed. if q.err != nil { return 0, q.err } // Run a copy of the query, with keysOnly true (if we're not a projection, // since the two are incompatible), and an adjusted offset. We also set the // limit to zero, as we don't want any actual entity data, just the number // of skipped results. newQ := q.clone() newQ.keysOnly = len(newQ.projection) == 0 newQ.limit = 0 if q.limit < 0 { // If the original query was unlimited, set the new query's offset to maximum. newQ.offset = math.MaxInt32 } else { newQ.offset = q.offset + q.limit if newQ.offset < 0 { // Do the best we can, in the presence of overflow. newQ.offset = math.MaxInt32 } } req := &pb.Query{} if err := newQ.toProto(req, internal.FullyQualifiedAppID(c)); err != nil { return 0, err } res := &pb.QueryResult{} if err := internal.Call(c, "datastore_v3", "RunQuery", req, res); err != nil { return 0, err } // n is the count we will return. For example, suppose that our original // query had an offset of 4 and a limit of 2008: the count will be 2008, // provided that there are at least 2012 matching entities. However, the // RPCs will only skip 1000 results at a time. The RPC sequence is: // call RunQuery with (offset, limit) = (2012, 0) // 2012 == newQ.offset // response has (skippedResults, moreResults) = (1000, true) // n += 1000 // n == 1000 // call Next with (offset, limit) = (1012, 0) // 1012 == newQ.offset - n // response has (skippedResults, moreResults) = (1000, true) // n += 1000 // n == 2000 // call Next with (offset, limit) = (12, 0) // 12 == newQ.offset - n // response has (skippedResults, moreResults) = (12, false) // n += 12 // n == 2012 // // exit the loop // n -= 4 // n == 2008 var n int32 for { // The QueryResult should have no actual entity data, just skipped results. if len(res.Result) != 0 { return 0, errors.New("datastore: internal error: Count request returned too much data") } n += res.GetSkippedResults() if !res.GetMoreResults() { break } if err := callNext(c, res, newQ.offset-n, 0); err != nil { return 0, err } } n -= q.offset if n < 0 { // If the offset was greater than the number of matching entities, // return 0 instead of negative. n = 0 } return int(n), nil } // callNext issues a datastore_v3/Next RPC to advance a cursor, such as that // returned by a query with more results. func callNext(c context.Context, res *pb.QueryResult, offset, limit int32) error { if res.Cursor == nil { return errors.New("datastore: internal error: server did not return a cursor") } req := &pb.NextRequest{ Cursor: res.Cursor, } if limit >= 0 { req.Count = proto.Int32(limit) } if offset != 0 { req.Offset = proto.Int32(offset) } if res.CompiledCursor != nil { req.Compile = proto.Bool(true) } res.Reset() return internal.Call(c, "datastore_v3", "Next", req, res) } // GetAll runs the query in the given context and returns all keys that match // that query, as well as appending the values to dst. // // dst must have type *[]S or *[]*S or *[]P, for some struct type S or some non- // interface, non-pointer type P such that P or *P implements PropertyLoadSaver. // // As a special case, *PropertyList is an invalid type for dst, even though a // PropertyList is a slice of structs. It is treated as invalid to avoid being // mistakenly passed when *[]PropertyList was intended. // // The keys returned by GetAll will be in a 1-1 correspondence with the entities // added to dst. // // If q is a ``keys-only'' query, GetAll ignores dst and only returns the keys. func (q *Query) GetAll(c context.Context, dst interface{}) ([]*Key, error) { var ( dv reflect.Value mat multiArgType elemType reflect.Type errFieldMismatch error ) if !q.keysOnly { dv = reflect.ValueOf(dst) if dv.Kind() != reflect.Ptr || dv.IsNil() { return nil, ErrInvalidEntityType } dv = dv.Elem() mat, elemType = checkMultiArg(dv) if mat == multiArgTypeInvalid || mat == multiArgTypeInterface { return nil, ErrInvalidEntityType } } var keys []*Key for t := q.Run(c); ; { k, e, err := t.next() if err == Done { break } if err != nil { return keys, err } if !q.keysOnly { ev := reflect.New(elemType) if elemType.Kind() == reflect.Map { // This is a special case. The zero values of a map type are // not immediately useful; they have to be make'd. // // Funcs and channels are similar, in that a zero value is not useful, // but even a freshly make'd channel isn't useful: there's no fixed // channel buffer size that is always going to be large enough, and // there's no goroutine to drain the other end. Theoretically, these // types could be supported, for example by sniffing for a constructor // method or requiring prior registration, but for now it's not a // frequent enough concern to be worth it. Programmers can work around // it by explicitly using Iterator.Next instead of the Query.GetAll // convenience method. x := reflect.MakeMap(elemType) ev.Elem().Set(x) } if err = loadEntity(ev.Interface(), e); err != nil { if _, ok := err.(*ErrFieldMismatch); ok { // We continue loading entities even in the face of field mismatch errors. // If we encounter any other error, that other error is returned. Otherwise, // an ErrFieldMismatch is returned. errFieldMismatch = err } else { return keys, err } } if mat != multiArgTypeStructPtr { ev = ev.Elem() } dv.Set(reflect.Append(dv, ev)) } keys = append(keys, k) } return keys, errFieldMismatch } // Run runs the query in the given context. func (q *Query) Run(c context.Context) *Iterator { if q.err != nil { return &Iterator{err: q.err} } t := &Iterator{ c: c, limit: q.limit, q: q, prevCC: q.start, } var req pb.Query if err := q.toProto(&req, internal.FullyQualifiedAppID(c)); err != nil { t.err = err return t } if err := internal.Call(c, "datastore_v3", "RunQuery", &req, &t.res); err != nil { t.err = err return t } offset := q.offset - t.res.GetSkippedResults() for offset > 0 && t.res.GetMoreResults() { t.prevCC = t.res.CompiledCursor if err := callNext(t.c, &t.res, offset, t.limit); err != nil { t.err = err break } skip := t.res.GetSkippedResults() if skip < 0 { t.err = errors.New("datastore: internal error: negative number of skipped_results") break } offset -= skip } if offset < 0 { t.err = errors.New("datastore: internal error: query offset was overshot") } return t } // Iterator is the result of running a query. type Iterator struct { c context.Context err error // res is the result of the most recent RunQuery or Next API call. res pb.QueryResult // i is how many elements of res.Result we have iterated over. i int // limit is the limit on the number of results this iterator should return. // A negative value means unlimited. limit int32 // q is the original query which yielded this iterator. q *Query // prevCC is the compiled cursor that marks the end of the previous batch // of results. prevCC *pb.CompiledCursor } // Done is returned when a query iteration has completed. var Done = errors.New("datastore: query has no more results") // Next returns the key of the next result. When there are no more results, // Done is returned as the error. // // If the query is not keys only and dst is non-nil, it also loads the entity // stored for that key into the struct pointer or PropertyLoadSaver dst, with // the same semantics and possible errors as for the Get function. func (t *Iterator) Next(dst interface{}) (*Key, error) { k, e, err := t.next() if err != nil { return nil, err } if dst != nil && !t.q.keysOnly { err = loadEntity(dst, e) } return k, err } func (t *Iterator) next() (*Key, *pb.EntityProto, error) { if t.err != nil { return nil, nil, t.err } // Issue datastore_v3/Next RPCs as necessary. for t.i == len(t.res.Result) { if !t.res.GetMoreResults() { t.err = Done return nil, nil, t.err } t.prevCC = t.res.CompiledCursor if err := callNext(t.c, &t.res, 0, t.limit); err != nil { t.err = err return nil, nil, t.err } if t.res.GetSkippedResults() != 0 { t.err = errors.New("datastore: internal error: iterator has skipped results") return nil, nil, t.err } t.i = 0 if t.limit >= 0 { t.limit -= int32(len(t.res.Result)) if t.limit < 0 { t.err = errors.New("datastore: internal error: query returned more results than the limit") return nil, nil, t.err } } } // Extract the key from the t.i'th element of t.res.Result. e := t.res.Result[t.i] t.i++ if e.Key == nil { return nil, nil, errors.New("datastore: internal error: server did not return a key") } k, err := protoToKey(e.Key) if err != nil || k.Incomplete() { return nil, nil, errors.New("datastore: internal error: server returned an invalid key") } return k, e, nil } // Cursor returns a cursor for the iterator's current location. func (t *Iterator) Cursor() (Cursor, error) { if t.err != nil && t.err != Done { return Cursor{}, t.err } // If we are at either end of the current batch of results, // return the compiled cursor at that end. skipped := t.res.GetSkippedResults() if t.i == 0 && skipped == 0 { if t.prevCC == nil { // A nil pointer (of type *pb.CompiledCursor) means no constraint: // passing it as the end cursor of a new query means unlimited results // (glossing over the integer limit parameter for now). // A non-nil pointer to an empty pb.CompiledCursor means the start: // passing it as the end cursor of a new query means 0 results. // If prevCC was nil, then the original query had no start cursor, but // Iterator.Cursor should return "the start" instead of unlimited. return Cursor{&zeroCC}, nil } return Cursor{t.prevCC}, nil } if t.i == len(t.res.Result) { return Cursor{t.res.CompiledCursor}, nil } // Otherwise, re-run the query offset to this iterator's position, starting from // the most recent compiled cursor. This is done on a best-effort basis, as it // is racy; if a concurrent process has added or removed entities, then the // cursor returned may be inconsistent. q := t.q.clone() q.start = t.prevCC q.offset = skipped + int32(t.i) q.limit = 0 q.keysOnly = len(q.projection) == 0 t1 := q.Run(t.c) _, _, err := t1.next() if err != Done { if err == nil { err = fmt.Errorf("datastore: internal error: zero-limit query did not have zero results") } return Cursor{}, err } return Cursor{t1.res.CompiledCursor}, nil } var zeroCC pb.CompiledCursor // Cursor is an iterator's position. It can be converted to and from an opaque // string. A cursor can be used from different HTTP requests, but only with a // query with the same kind, ancestor, filter and order constraints. type Cursor struct { cc *pb.CompiledCursor } // String returns a base-64 string representation of a cursor. func (c Cursor) String() string { if c.cc == nil { return "" } b, err := proto.Marshal(c.cc) if err != nil { // The only way to construct a Cursor with a non-nil cc field is to // unmarshal from the byte representation. We panic if the unmarshal // succeeds but the marshaling of the unchanged protobuf value fails. panic(fmt.Sprintf("datastore: internal error: malformed cursor: %v", err)) } return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") } // Decode decodes a cursor from its base-64 string representation. func DecodeCursor(s string) (Cursor, error) { if s == "" { return Cursor{&zeroCC}, nil } if n := len(s) % 4; n != 0 { s += strings.Repeat("=", 4-n) } b, err := base64.URLEncoding.DecodeString(s) if err != nil { return Cursor{}, err } cc := &pb.CompiledCursor{} if err := proto.Unmarshal(b, cc); err != nil { return Cursor{}, err } return Cursor{cc}, nil } ================================================ FILE: vendor/google.golang.org/appengine/datastore/save.go ================================================ // Copyright 2011 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. package datastore import ( "errors" "fmt" "math" "reflect" "time" "github.com/golang/protobuf/proto" "google.golang.org/appengine" pb "google.golang.org/appengine/internal/datastore" ) func toUnixMicro(t time.Time) int64 { // We cannot use t.UnixNano() / 1e3 because we want to handle times more than // 2^63 nanoseconds (which is about 292 years) away from 1970, and those cannot // be represented in the numerator of a single int64 divide. return t.Unix()*1e6 + int64(t.Nanosecond()/1e3) } func fromUnixMicro(t int64) time.Time { return time.Unix(t/1e6, (t%1e6)*1e3) } var ( minTime = time.Unix(int64(math.MinInt64)/1e6, (int64(math.MinInt64)%1e6)*1e3) maxTime = time.Unix(int64(math.MaxInt64)/1e6, (int64(math.MaxInt64)%1e6)*1e3) ) // valueToProto converts a named value to a newly allocated Property. // The returned error string is empty on success. func valueToProto(defaultAppID, name string, v reflect.Value, multiple bool) (p *pb.Property, errStr string) { var ( pv pb.PropertyValue unsupported bool ) switch v.Kind() { case reflect.Invalid: // No-op. case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: pv.Int64Value = proto.Int64(v.Int()) case reflect.Bool: pv.BooleanValue = proto.Bool(v.Bool()) case reflect.String: pv.StringValue = proto.String(v.String()) case reflect.Float32, reflect.Float64: pv.DoubleValue = proto.Float64(v.Float()) case reflect.Ptr: if k, ok := v.Interface().(*Key); ok { if k != nil { pv.Referencevalue = keyToReferenceValue(defaultAppID, k) } } else { unsupported = true } case reflect.Struct: switch t := v.Interface().(type) { case time.Time: if t.Before(minTime) || t.After(maxTime) { return nil, "time value out of range" } pv.Int64Value = proto.Int64(toUnixMicro(t)) case appengine.GeoPoint: if !t.Valid() { return nil, "invalid GeoPoint value" } // NOTE: Strangely, latitude maps to X, longitude to Y. pv.Pointvalue = &pb.PropertyValue_PointValue{X: &t.Lat, Y: &t.Lng} default: unsupported = true } case reflect.Slice: if b, ok := v.Interface().([]byte); ok { pv.StringValue = proto.String(string(b)) } else { // nvToProto should already catch slice values. // If we get here, we have a slice of slice values. unsupported = true } default: unsupported = true } if unsupported { return nil, "unsupported datastore value type: " + v.Type().String() } p = &pb.Property{ Name: proto.String(name), Value: &pv, Multiple: proto.Bool(multiple), } if v.IsValid() { switch v.Interface().(type) { case []byte: p.Meaning = pb.Property_BLOB.Enum() case ByteString: p.Meaning = pb.Property_BYTESTRING.Enum() case appengine.BlobKey: p.Meaning = pb.Property_BLOBKEY.Enum() case time.Time: p.Meaning = pb.Property_GD_WHEN.Enum() case appengine.GeoPoint: p.Meaning = pb.Property_GEORSS_POINT.Enum() } } return p, "" } // saveEntity saves an EntityProto into a PropertyLoadSaver or struct pointer. func saveEntity(defaultAppID string, key *Key, src interface{}) (*pb.EntityProto, error) { var err error var props []Property if e, ok := src.(PropertyLoadSaver); ok { props, err = e.Save() } else { props, err = SaveStruct(src) } if err != nil { return nil, err } return propertiesToProto(defaultAppID, key, props) } func saveStructProperty(props *[]Property, name string, noIndex, multiple bool, v reflect.Value) error { p := Property{ Name: name, NoIndex: noIndex, Multiple: multiple, } switch x := v.Interface().(type) { case *Key: p.Value = x case time.Time: p.Value = x case appengine.BlobKey: p.Value = x case appengine.GeoPoint: p.Value = x case ByteString: p.Value = x default: switch v.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: p.Value = v.Int() case reflect.Bool: p.Value = v.Bool() case reflect.String: p.Value = v.String() case reflect.Float32, reflect.Float64: p.Value = v.Float() case reflect.Slice: if v.Type().Elem().Kind() == reflect.Uint8 { p.NoIndex = true p.Value = v.Bytes() } case reflect.Struct: if !v.CanAddr() { return fmt.Errorf("datastore: unsupported struct field: value is unaddressable") } sub, err := newStructPLS(v.Addr().Interface()) if err != nil { return fmt.Errorf("datastore: unsupported struct field: %v", err) } return sub.(structPLS).save(props, name, noIndex, multiple) } } if p.Value == nil { return fmt.Errorf("datastore: unsupported struct field type: %v", v.Type()) } *props = append(*props, p) return nil } func (s structPLS) Save() ([]Property, error) { var props []Property if err := s.save(&props, "", false, false); err != nil { return nil, err } return props, nil } func (s structPLS) save(props *[]Property, prefix string, noIndex, multiple bool) error { for i, t := range s.codec.byIndex { if t.name == "-" { continue } name := t.name if prefix != "" { name = prefix + name } v := s.v.Field(i) if !v.IsValid() || !v.CanSet() { continue } noIndex1 := noIndex || t.noIndex // For slice fields that aren't []byte, save each element. if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { for j := 0; j < v.Len(); j++ { if err := saveStructProperty(props, name, noIndex1, true, v.Index(j)); err != nil { return err } } continue } // Otherwise, save the field itself. if err := saveStructProperty(props, name, noIndex1, multiple, v); err != nil { return err } } return nil } func propertiesToProto(defaultAppID string, key *Key, props []Property) (*pb.EntityProto, error) { e := &pb.EntityProto{ Key: keyToProto(defaultAppID, key), } if key.parent == nil { e.EntityGroup = &pb.Path{} } else { e.EntityGroup = keyToProto(defaultAppID, key.root()).Path } prevMultiple := make(map[string]bool) for _, p := range props { if pm, ok := prevMultiple[p.Name]; ok { if !pm || !p.Multiple { return nil, fmt.Errorf("datastore: multiple Properties with Name %q, but Multiple is false", p.Name) } } else { prevMultiple[p.Name] = p.Multiple } x := &pb.Property{ Name: proto.String(p.Name), Value: new(pb.PropertyValue), Multiple: proto.Bool(p.Multiple), } switch v := p.Value.(type) { case int64: x.Value.Int64Value = proto.Int64(v) case bool: x.Value.BooleanValue = proto.Bool(v) case string: x.Value.StringValue = proto.String(v) if p.NoIndex { x.Meaning = pb.Property_TEXT.Enum() } case float64: x.Value.DoubleValue = proto.Float64(v) case *Key: if v != nil { x.Value.Referencevalue = keyToReferenceValue(defaultAppID, v) } case time.Time: if v.Before(minTime) || v.After(maxTime) { return nil, fmt.Errorf("datastore: time value out of range") } x.Value.Int64Value = proto.Int64(toUnixMicro(v)) x.Meaning = pb.Property_GD_WHEN.Enum() case appengine.BlobKey: x.Value.StringValue = proto.String(string(v)) x.Meaning = pb.Property_BLOBKEY.Enum() case appengine.GeoPoint: if !v.Valid() { return nil, fmt.Errorf("datastore: invalid GeoPoint value") } // NOTE: Strangely, latitude maps to X, longitude to Y. x.Value.Pointvalue = &pb.PropertyValue_PointValue{X: &v.Lat, Y: &v.Lng} x.Meaning = pb.Property_GEORSS_POINT.Enum() case []byte: x.Value.StringValue = proto.String(string(v)) x.Meaning = pb.Property_BLOB.Enum() if !p.NoIndex { return nil, fmt.Errorf("datastore: cannot index a []byte valued Property with Name %q", p.Name) } case ByteString: x.Value.StringValue = proto.String(string(v)) x.Meaning = pb.Property_BYTESTRING.Enum() default: if p.Value != nil { return nil, fmt.Errorf("datastore: invalid Value type for a Property with Name %q", p.Name) } } if p.NoIndex { e.RawProperty = append(e.RawProperty, x) } else { e.Property = append(e.Property, x) if len(e.Property) > maxIndexedProperties { return nil, errors.New("datastore: too many indexed properties") } } } return e, nil } ================================================ FILE: vendor/google.golang.org/appengine/datastore/transaction.go ================================================ // Copyright 2011 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. package datastore import ( "errors" "golang.org/x/net/context" "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/datastore" ) func init() { internal.RegisterTransactionSetter(func(x *pb.Query, t *pb.Transaction) { x.Transaction = t }) internal.RegisterTransactionSetter(func(x *pb.GetRequest, t *pb.Transaction) { x.Transaction = t }) internal.RegisterTransactionSetter(func(x *pb.PutRequest, t *pb.Transaction) { x.Transaction = t }) internal.RegisterTransactionSetter(func(x *pb.DeleteRequest, t *pb.Transaction) { x.Transaction = t }) } // ErrConcurrentTransaction is returned when a transaction is rolled back due // to a conflict with a concurrent transaction. var ErrConcurrentTransaction = errors.New("datastore: concurrent transaction") // RunInTransaction runs f in a transaction. It calls f with a transaction // context tc that f should use for all App Engine operations. // // If f returns nil, RunInTransaction attempts to commit the transaction, // returning nil if it succeeds. If the commit fails due to a conflicting // transaction, RunInTransaction retries f, each time with a new transaction // context. It gives up and returns ErrConcurrentTransaction after three // failed attempts. The number of attempts can be configured by specifying // TransactionOptions.Attempts. // // If f returns non-nil, then any datastore changes will not be applied and // RunInTransaction returns that same error. The function f is not retried. // // Note that when f returns, the transaction is not yet committed. Calling code // must be careful not to assume that any of f's changes have been committed // until RunInTransaction returns nil. // // Since f may be called multiple times, f should usually be idempotent. // datastore.Get is not idempotent when unmarshaling slice fields. // // Nested transactions are not supported; c may not be a transaction context. func RunInTransaction(c context.Context, f func(tc context.Context) error, opts *TransactionOptions) error { xg := false if opts != nil { xg = opts.XG } attempts := 3 if opts != nil && opts.Attempts > 0 { attempts = opts.Attempts } for i := 0; i < attempts; i++ { if err := internal.RunTransactionOnce(c, f, xg); err != internal.ErrConcurrentTransaction { return err } } return ErrConcurrentTransaction } // TransactionOptions are the options for running a transaction. type TransactionOptions struct { // XG is whether the transaction can cross multiple entity groups. In // comparison, a single group transaction is one where all datastore keys // used have the same root key. Note that cross group transactions do not // have the same behavior as single group transactions. In particular, it // is much more likely to see partially applied transactions in different // entity groups, in global queries. // It is valid to set XG to true even if the transaction is within a // single entity group. XG bool // Attempts controls the number of retries to perform when commits fail // due to a conflicting transaction. If omitted, it defaults to 3. Attempts int } ================================================ FILE: vendor/google.golang.org/appengine/delay/delay.go ================================================ // Copyright 2011 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. /* Package delay provides a way to execute code outside the scope of a user request by using the taskqueue API. To declare a function that may be executed later, call Func in a top-level assignment context, passing it an arbitrary string key and a function whose first argument is of type context.Context. var laterFunc = delay.Func("key", myFunc) It is also possible to use a function literal. var laterFunc = delay.Func("key", func(c context.Context, x string) { // ... }) To call a function, invoke its Call method. laterFunc.Call(c, "something") A function may be called any number of times. If the function has any return arguments, and the last one is of type error, the function may return a non-nil error to signal that the function should be retried. The arguments to functions may be of any type that is encodable by the gob package. If an argument is of interface type, it is the client's responsibility to register with the gob package whatever concrete type may be passed for that argument; see http://golang.org/pkg/gob/#Register for details. Any errors during initialization or execution of a function will be logged to the application logs. Error logs that occur during initialization will be associated with the request that invoked the Call method. The state of a function invocation that has not yet successfully executed is preserved by combining the file name in which it is declared with the string key that was passed to the Func function. Updating an app with pending function invocations is safe as long as the relevant functions have the (filename, key) combination preserved. The delay package uses the Task Queue API to create tasks that call the reserved application path "/_ah/queue/go/delay". This path must not be marked as "login: required" in app.yaml; it must be marked as "login: admin" or have no access restriction. */ package delay import ( "bytes" "encoding/gob" "errors" "fmt" "net/http" "reflect" "runtime" "golang.org/x/net/context" "google.golang.org/appengine" "google.golang.org/appengine/log" "google.golang.org/appengine/taskqueue" ) // Function represents a function that may have a delayed invocation. type Function struct { fv reflect.Value // Kind() == reflect.Func key string err error // any error during initialization } const ( // The HTTP path for invocations. path = "/_ah/queue/go/delay" // Use the default queue. queue = "" ) var ( // registry of all delayed functions funcs = make(map[string]*Function) // precomputed types contextType = reflect.TypeOf((*context.Context)(nil)).Elem() errorType = reflect.TypeOf((*error)(nil)).Elem() // errors errFirstArg = errors.New("first argument must be context.Context") ) // Func declares a new Function. The second argument must be a function with a // first argument of type context.Context. // This function must be called at program initialization time. That means it // must be called in a global variable declaration or from an init function. // This restriction is necessary because the instance that delays a function // call may not be the one that executes it. Only the code executed at program // initialization time is guaranteed to have been run by an instance before it // receives a request. func Func(key string, i interface{}) *Function { f := &Function{fv: reflect.ValueOf(i)} // Derive unique, somewhat stable key for this func. _, file, _, _ := runtime.Caller(1) f.key = file + ":" + key t := f.fv.Type() if t.Kind() != reflect.Func { f.err = errors.New("not a function") return f } if t.NumIn() == 0 || t.In(0) != contextType { f.err = errFirstArg return f } // Register the function's arguments with the gob package. // This is required because they are marshaled inside a []interface{}. // gob.Register only expects to be called during initialization; // that's fine because this function expects the same. for i := 0; i < t.NumIn(); i++ { // Only concrete types may be registered. If the argument has // interface type, the client is resposible for registering the // concrete types it will hold. if t.In(i).Kind() == reflect.Interface { continue } gob.Register(reflect.Zero(t.In(i)).Interface()) } funcs[f.key] = f return f } type invocation struct { Key string Args []interface{} } // Call invokes a delayed function. // err := f.Call(c, ...) // is equivalent to // t, _ := f.Task(...) // err := taskqueue.Add(c, t, "") func (f *Function) Call(c context.Context, args ...interface{}) error { t, err := f.Task(args...) if err != nil { return err } _, err = taskqueueAdder(c, t, queue) return err } // Task creates a Task that will invoke the function. // Its parameters may be tweaked before adding it to a queue. // Users should not modify the Path or Payload fields of the returned Task. func (f *Function) Task(args ...interface{}) (*taskqueue.Task, error) { if f.err != nil { return nil, fmt.Errorf("delay: func is invalid: %v", f.err) } nArgs := len(args) + 1 // +1 for the context.Context ft := f.fv.Type() minArgs := ft.NumIn() if ft.IsVariadic() { minArgs-- } if nArgs < minArgs { return nil, fmt.Errorf("delay: too few arguments to func: %d < %d", nArgs, minArgs) } if !ft.IsVariadic() && nArgs > minArgs { return nil, fmt.Errorf("delay: too many arguments to func: %d > %d", nArgs, minArgs) } // Check arg types. for i := 1; i < nArgs; i++ { at := reflect.TypeOf(args[i-1]) var dt reflect.Type if i < minArgs { // not a variadic arg dt = ft.In(i) } else { // a variadic arg dt = ft.In(minArgs).Elem() } // nil arguments won't have a type, so they need special handling. if at == nil { // nil interface switch dt.Kind() { case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: continue // may be nil } return nil, fmt.Errorf("delay: argument %d has wrong type: %v is not nilable", i, dt) } switch at.Kind() { case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: av := reflect.ValueOf(args[i-1]) if av.IsNil() { // nil value in interface; not supported by gob, so we replace it // with a nil interface value args[i-1] = nil } } if !at.AssignableTo(dt) { return nil, fmt.Errorf("delay: argument %d has wrong type: %v is not assignable to %v", i, at, dt) } } inv := invocation{ Key: f.key, Args: args, } buf := new(bytes.Buffer) if err := gob.NewEncoder(buf).Encode(inv); err != nil { return nil, fmt.Errorf("delay: gob encoding failed: %v", err) } return &taskqueue.Task{ Path: path, Payload: buf.Bytes(), }, nil } var taskqueueAdder = taskqueue.Add // for testing func init() { http.HandleFunc(path, func(w http.ResponseWriter, req *http.Request) { runFunc(appengine.NewContext(req), w, req) }) } func runFunc(c context.Context, w http.ResponseWriter, req *http.Request) { defer req.Body.Close() var inv invocation if err := gob.NewDecoder(req.Body).Decode(&inv); err != nil { log.Errorf(c, "delay: failed decoding task payload: %v", err) log.Warningf(c, "delay: dropping task") return } f := funcs[inv.Key] if f == nil { log.Errorf(c, "delay: no func with key %q found", inv.Key) log.Warningf(c, "delay: dropping task") return } ft := f.fv.Type() in := []reflect.Value{reflect.ValueOf(c)} for _, arg := range inv.Args { var v reflect.Value if arg != nil { v = reflect.ValueOf(arg) } else { // Task was passed a nil argument, so we must construct // the zero value for the argument here. n := len(in) // we're constructing the nth argument var at reflect.Type if !ft.IsVariadic() || n < ft.NumIn()-1 { at = ft.In(n) } else { at = ft.In(ft.NumIn() - 1).Elem() } v = reflect.Zero(at) } in = append(in, v) } out := f.fv.Call(in) if n := ft.NumOut(); n > 0 && ft.Out(n-1) == errorType { if errv := out[n-1]; !errv.IsNil() { log.Errorf(c, "delay: func failed (will retry): %v", errv.Interface()) w.WriteHeader(http.StatusInternalServerError) return } } } ================================================ FILE: vendor/google.golang.org/appengine/demos/guestbook/app.yaml ================================================ # Demo application for Managed VMs. runtime: go vm: true api_version: go1 manual_scaling: instances: 1 handlers: # Favicon. Without this, the browser hits this once per page view. - url: /favicon.ico static_files: favicon.ico upload: favicon.ico # Main app. All the real work is here. - url: /.* script: _go_app ================================================ FILE: vendor/google.golang.org/appengine/demos/guestbook/guestbook.go ================================================ // Copyright 2011 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. // This example only works on Managed VMs. // +build !appengine package main import ( "html/template" "net/http" "time" "golang.org/x/net/context" "google.golang.org/appengine" "google.golang.org/appengine/datastore" "google.golang.org/appengine/log" "google.golang.org/appengine/user" ) var initTime time.Time type Greeting struct { Author string Content string Date time.Time } func main() { http.HandleFunc("/", handleMainPage) http.HandleFunc("/sign", handleSign) appengine.Main() } // guestbookKey returns the key used for all guestbook entries. func guestbookKey(ctx context.Context) *datastore.Key { // The string "default_guestbook" here could be varied to have multiple guestbooks. return datastore.NewKey(ctx, "Guestbook", "default_guestbook", 0, nil) } var tpl = template.Must(template.ParseGlob("templates/*.html")) func handleMainPage(w http.ResponseWriter, r *http.Request) { if r.Method != "GET" { http.Error(w, "GET requests only", http.StatusMethodNotAllowed) return } if r.URL.Path != "/" { http.NotFound(w, r) return } ctx := appengine.NewContext(r) tic := time.Now() q := datastore.NewQuery("Greeting").Ancestor(guestbookKey(ctx)).Order("-Date").Limit(10) var gg []*Greeting if _, err := q.GetAll(ctx, &gg); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) log.Errorf(ctx, "GetAll: %v", err) return } log.Infof(ctx, "Datastore lookup took %s", time.Since(tic).String()) log.Infof(ctx, "Rendering %d greetings", len(gg)) var email, logout, login string if u := user.Current(ctx); u != nil { logout, _ = user.LogoutURL(ctx, "/") email = u.Email } else { login, _ = user.LoginURL(ctx, "/") } data := struct { Greetings []*Greeting Login, Logout, Email string }{ Greetings: gg, Login: login, Logout: logout, Email: email, } w.Header().Set("Content-Type", "text/html; charset=utf-8") if err := tpl.ExecuteTemplate(w, "guestbook.html", data); err != nil { log.Errorf(ctx, "%v", err) } } func handleSign(w http.ResponseWriter, r *http.Request) { if r.Method != "POST" { http.Error(w, "POST requests only", http.StatusMethodNotAllowed) return } ctx := appengine.NewContext(r) g := &Greeting{ Content: r.FormValue("content"), Date: time.Now(), } if u := user.Current(ctx); u != nil { g.Author = u.String() } key := datastore.NewIncompleteKey(ctx, "Greeting", guestbookKey(ctx)) if _, err := datastore.Put(ctx, key, g); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } // Redirect with 303 which causes the subsequent request to use GET. http.Redirect(w, r, "/", http.StatusSeeOther) } ================================================ FILE: vendor/google.golang.org/appengine/demos/guestbook/index.yaml ================================================ indexes: - kind: Greeting ancestor: yes properties: - name: Date direction: desc ================================================ FILE: vendor/google.golang.org/appengine/demos/guestbook/templates/guestbook.html ================================================ Guestbook Demo

{{with .Email}}You are currently logged in as {{.}}.{{end}} {{with .Login}}Sign in{{end}} {{with .Logout}}Sign out{{end}}

{{range .Greetings }}

{{with .Author}}{{.}}{{else}}An anonymous person{{end}} on {{.Date.Format "3:04pm, Mon 2 Jan"}} wrote

{{.Content}}

{{end}}
================================================ FILE: vendor/google.golang.org/appengine/demos/helloworld/app.yaml ================================================ runtime: go api_version: go1 vm: true manual_scaling: instances: 1 handlers: - url: /favicon.ico static_files: favicon.ico upload: favicon.ico - url: /.* script: _go_app ================================================ FILE: vendor/google.golang.org/appengine/demos/helloworld/helloworld.go ================================================ // Copyright 2014 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. // This example only works on Managed VMs. // +build !appengine package main import ( "html/template" "net/http" "time" "google.golang.org/appengine" "google.golang.org/appengine/log" ) var initTime = time.Now() func main() { http.HandleFunc("/", handle) appengine.Main() } func handle(w http.ResponseWriter, r *http.Request) { if r.URL.Path != "/" { http.NotFound(w, r) return } ctx := appengine.NewContext(r) log.Infof(ctx, "Serving the front page.") tmpl.Execute(w, time.Since(initTime)) } var tmpl = template.Must(template.New("front").Parse(`

Hello, World! 세상아 안녕!

This instance has been running for {{.}}.

`)) ================================================ FILE: vendor/google.golang.org/appengine/errors.go ================================================ // Copyright 2011 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. // This file provides error functions for common API failure modes. package appengine import ( "fmt" "google.golang.org/appengine/internal" ) // IsOverQuota reports whether err represents an API call failure // due to insufficient available quota. func IsOverQuota(err error) bool { callErr, ok := err.(*internal.CallError) return ok && callErr.Code == 4 } // MultiError is returned by batch operations when there are errors with // particular elements. Errors will be in a one-to-one correspondence with // the input elements; successful elements will have a nil entry. type MultiError []error func (m MultiError) Error() string { s, n := "", 0 for _, e := range m { if e != nil { if n == 0 { s = e.Error() } n++ } } switch n { case 0: return "(0 errors)" case 1: return s case 2: return s + " (and 1 other error)" } return fmt.Sprintf("%s (and %d other errors)", s, n-1) } ================================================ FILE: vendor/google.golang.org/appengine/file/file.go ================================================ // Copyright 2014 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. // Package file provides helper functions for using Google Cloud Storage. package file import ( "fmt" "golang.org/x/net/context" "google.golang.org/appengine/internal" aipb "google.golang.org/appengine/internal/app_identity" ) // DefaultBucketName returns the name of this application's // default Google Cloud Storage bucket. func DefaultBucketName(c context.Context) (string, error) { req := &aipb.GetDefaultGcsBucketNameRequest{} res := &aipb.GetDefaultGcsBucketNameResponse{} err := internal.Call(c, "app_identity_service", "GetDefaultGcsBucketName", req, res) if err != nil { return "", fmt.Errorf("file: no default bucket name returned in RPC response: %v", res) } return res.GetDefaultGcsBucketName(), nil } ================================================ FILE: vendor/google.golang.org/appengine/identity.go ================================================ // Copyright 2011 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. package appengine import ( "time" "golang.org/x/net/context" "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/app_identity" modpb "google.golang.org/appengine/internal/modules" ) // AppID returns the application ID for the current application. // The string will be a plain application ID (e.g. "appid"), with a // domain prefix for custom domain deployments (e.g. "example.com:appid"). func AppID(c context.Context) string { return internal.AppID(c) } // DefaultVersionHostname returns the standard hostname of the default version // of the current application (e.g. "my-app.appspot.com"). This is suitable for // use in constructing URLs. func DefaultVersionHostname(c context.Context) string { return internal.DefaultVersionHostname(c) } // ModuleName returns the module name of the current instance. func ModuleName(c context.Context) string { return internal.ModuleName(c) } // ModuleHostname returns a hostname of a module instance. // If module is the empty string, it refers to the module of the current instance. // If version is empty, it refers to the version of the current instance if valid, // or the default version of the module of the current instance. // If instance is empty, ModuleHostname returns the load-balancing hostname. func ModuleHostname(c context.Context, module, version, instance string) (string, error) { req := &modpb.GetHostnameRequest{} if module != "" { req.Module = &module } if version != "" { req.Version = &version } if instance != "" { req.Instance = &instance } res := &modpb.GetHostnameResponse{} if err := internal.Call(c, "modules", "GetHostname", req, res); err != nil { return "", err } return *res.Hostname, nil } // VersionID returns the version ID for the current application. // It will be of the form "X.Y", where X is specified in app.yaml, // and Y is a number generated when each version of the app is uploaded. // It does not include a module name. func VersionID(c context.Context) string { return internal.VersionID(c) } // InstanceID returns a mostly-unique identifier for this instance. func InstanceID() string { return internal.InstanceID() } // Datacenter returns an identifier for the datacenter that the instance is running in. func Datacenter(c context.Context) string { return internal.Datacenter(c) } // ServerSoftware returns the App Engine release version. // In production, it looks like "Google App Engine/X.Y.Z". // In the development appserver, it looks like "Development/X.Y". func ServerSoftware() string { return internal.ServerSoftware() } // RequestID returns a string that uniquely identifies the request. func RequestID(c context.Context) string { return internal.RequestID(c) } // AccessToken generates an OAuth2 access token for the specified scopes on // behalf of service account of this application. This token will expire after // the returned time. func AccessToken(c context.Context, scopes ...string) (token string, expiry time.Time, err error) { req := &pb.GetAccessTokenRequest{Scope: scopes} res := &pb.GetAccessTokenResponse{} err = internal.Call(c, "app_identity_service", "GetAccessToken", req, res) if err != nil { return "", time.Time{}, err } return res.GetAccessToken(), time.Unix(res.GetExpirationTime(), 0), nil } // Certificate represents a public certificate for the app. type Certificate struct { KeyName string Data []byte // PEM-encoded X.509 certificate } // PublicCertificates retrieves the public certificates for the app. // They can be used to verify a signature returned by SignBytes. func PublicCertificates(c context.Context) ([]Certificate, error) { req := &pb.GetPublicCertificateForAppRequest{} res := &pb.GetPublicCertificateForAppResponse{} if err := internal.Call(c, "app_identity_service", "GetPublicCertificatesForApp", req, res); err != nil { return nil, err } var cs []Certificate for _, pc := range res.PublicCertificateList { cs = append(cs, Certificate{ KeyName: pc.GetKeyName(), Data: []byte(pc.GetX509CertificatePem()), }) } return cs, nil } // ServiceAccount returns a string representing the service account name, in // the form of an email address (typically app_id@appspot.gserviceaccount.com). func ServiceAccount(c context.Context) (string, error) { req := &pb.GetServiceAccountNameRequest{} res := &pb.GetServiceAccountNameResponse{} err := internal.Call(c, "app_identity_service", "GetServiceAccountName", req, res) if err != nil { return "", err } return res.GetServiceAccountName(), err } // SignBytes signs bytes using a private key unique to your application. func SignBytes(c context.Context, bytes []byte) (keyName string, signature []byte, err error) { req := &pb.SignForAppRequest{BytesToSign: bytes} res := &pb.SignForAppResponse{} if err := internal.Call(c, "app_identity_service", "SignForApp", req, res); err != nil { return "", nil, err } return res.GetKeyName(), res.GetSignatureBytes(), nil } func init() { internal.RegisterErrorCodeMap("app_identity_service", pb.AppIdentityServiceError_ErrorCode_name) internal.RegisterErrorCodeMap("modules", modpb.ModulesServiceError_ErrorCode_name) } ================================================ FILE: vendor/google.golang.org/appengine/image/image.go ================================================ // Copyright 2012 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. // Package image provides image services. package image import ( "fmt" "net/url" "golang.org/x/net/context" "google.golang.org/appengine" "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/image" ) type ServingURLOptions struct { Secure bool // whether the URL should use HTTPS // Size must be between zero and 1600. // If Size is non-zero, a resized version of the image is served, // and Size is the served image's longest dimension. The aspect ratio is preserved. // If Crop is true the image is cropped from the center instead of being resized. Size int Crop bool } // ServingURL returns a URL that will serve an image from Blobstore. func ServingURL(c context.Context, key appengine.BlobKey, opts *ServingURLOptions) (*url.URL, error) { req := &pb.ImagesGetUrlBaseRequest{ BlobKey: (*string)(&key), } if opts != nil && opts.Secure { req.CreateSecureUrl = &opts.Secure } res := &pb.ImagesGetUrlBaseResponse{} if err := internal.Call(c, "images", "GetUrlBase", req, res); err != nil { return nil, err } // The URL may have suffixes added to dynamically resize or crop: // - adding "=s32" will serve the image resized to 32 pixels, preserving the aspect ratio. // - adding "=s32-c" is the same as "=s32" except it will be cropped. u := *res.Url if opts != nil && opts.Size > 0 { u += fmt.Sprintf("=s%d", opts.Size) if opts.Crop { u += "-c" } } return url.Parse(u) } // DeleteServingURL deletes the serving URL for an image. func DeleteServingURL(c context.Context, key appengine.BlobKey) error { req := &pb.ImagesDeleteUrlBaseRequest{ BlobKey: (*string)(&key), } res := &pb.ImagesDeleteUrlBaseResponse{} return internal.Call(c, "images", "DeleteUrlBase", req, res) } func init() { internal.RegisterErrorCodeMap("images", pb.ImagesServiceError_ErrorCode_name) } ================================================ FILE: vendor/google.golang.org/appengine/internal/aetesting/fake.go ================================================ // Copyright 2011 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. // Package aetesting provides utilities for testing App Engine packages. // This is not for testing user applications. package aetesting import ( "fmt" "reflect" "testing" "github.com/golang/protobuf/proto" "golang.org/x/net/context" "google.golang.org/appengine/internal" ) // FakeSingleContext returns a context whose Call invocations will be serviced // by f, which should be a function that has two arguments of the input and output // protocol buffer type, and one error return. func FakeSingleContext(t *testing.T, service, method string, f interface{}) context.Context { fv := reflect.ValueOf(f) if fv.Kind() != reflect.Func { t.Fatal("not a function") } ft := fv.Type() if ft.NumIn() != 2 || ft.NumOut() != 1 { t.Fatalf("f has %d in and %d out, want 2 in and 1 out", ft.NumIn(), ft.NumOut()) } for i := 0; i < 2; i++ { at := ft.In(i) if !at.Implements(protoMessageType) { t.Fatalf("arg %d does not implement proto.Message", i) } } if ft.Out(0) != errorType { t.Fatalf("f's return is %v, want error", ft.Out(0)) } s := &single{ t: t, service: service, method: method, f: fv, } return internal.WithCallOverride(context.Background(), s.call) } var ( protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem() errorType = reflect.TypeOf((*error)(nil)).Elem() ) type single struct { t *testing.T service, method string f reflect.Value } func (s *single) call(ctx context.Context, service, method string, in, out proto.Message) error { if service == "__go__" { if method == "GetNamespace" { return nil // always yield an empty namespace } return fmt.Errorf("Unknown API call /%s.%s", service, method) } if service != s.service || method != s.method { s.t.Fatalf("Unexpected call to /%s.%s", service, method) } ins := []reflect.Value{ reflect.ValueOf(in), reflect.ValueOf(out), } outs := s.f.Call(ins) if outs[0].IsNil() { return nil } return outs[0].Interface().(error) } ================================================ FILE: vendor/google.golang.org/appengine/internal/api.go ================================================ // Copyright 2011 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. // +build !appengine package internal import ( "bytes" "errors" "fmt" "io/ioutil" "log" "net" "net/http" "net/url" "os" "runtime" "strconv" "strings" "sync" "sync/atomic" "time" "github.com/golang/protobuf/proto" netcontext "golang.org/x/net/context" basepb "google.golang.org/appengine/internal/base" logpb "google.golang.org/appengine/internal/log" remotepb "google.golang.org/appengine/internal/remote_api" ) const ( apiPath = "/rpc_http" ) var ( // Incoming headers. ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket") dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo") traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context") curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace") userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP") remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr") // Outgoing headers. apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint") apiEndpointHeaderValue = []string{"app-engine-apis"} apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method") apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"} apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline") apiContentType = http.CanonicalHeaderKey("Content-Type") apiContentTypeValue = []string{"application/octet-stream"} logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count") apiHTTPClient = &http.Client{ Transport: &http.Transport{ Proxy: http.ProxyFromEnvironment, Dial: limitDial, }, } ) func apiURL() *url.URL { host, port := "appengine.googleapis.internal", "10001" if h := os.Getenv("API_HOST"); h != "" { host = h } if p := os.Getenv("API_PORT"); p != "" { port = p } return &url.URL{ Scheme: "http", Host: host + ":" + port, Path: apiPath, } } func handleHTTP(w http.ResponseWriter, r *http.Request) { c := &context{ req: r, outHeader: w.Header(), apiURL: apiURL(), } stopFlushing := make(chan int) ctxs.Lock() ctxs.m[r] = c ctxs.Unlock() defer func() { ctxs.Lock() delete(ctxs.m, r) ctxs.Unlock() }() // Patch up RemoteAddr so it looks reasonable. if addr := r.Header.Get(userIPHeader); addr != "" { r.RemoteAddr = addr } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { r.RemoteAddr = addr } else { // Should not normally reach here, but pick a sensible default anyway. r.RemoteAddr = "127.0.0.1" } // The address in the headers will most likely be of these forms: // 123.123.123.123 // 2001:db8::1 // net/http.Request.RemoteAddr is specified to be in "IP:port" form. if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { // Assume the remote address is only a host; add a default port. r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") } // Start goroutine responsible for flushing app logs. // This is done after adding c to ctx.m (and stopped before removing it) // because flushing logs requires making an API call. go c.logFlusher(stopFlushing) executeRequestSafely(c, r) c.outHeader = nil // make sure header changes aren't respected any more stopFlushing <- 1 // any logging beyond this point will be dropped // Flush any pending logs asynchronously. c.pendingLogs.Lock() flushes := c.pendingLogs.flushes if len(c.pendingLogs.lines) > 0 { flushes++ } c.pendingLogs.Unlock() go c.flushLog(false) w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) // Avoid nil Write call if c.Write is never called. if c.outCode != 0 { w.WriteHeader(c.outCode) } if c.outBody != nil { w.Write(c.outBody) } } func executeRequestSafely(c *context, r *http.Request) { defer func() { if x := recover(); x != nil { logf(c, 4, "%s", renderPanic(x)) // 4 == critical c.outCode = 500 } }() http.DefaultServeMux.ServeHTTP(c, r) } func renderPanic(x interface{}) string { buf := make([]byte, 16<<10) // 16 KB should be plenty buf = buf[:runtime.Stack(buf, false)] // Remove the first few stack frames: // this func // the recover closure in the caller // That will root the stack trace at the site of the panic. const ( skipStart = "internal.renderPanic" skipFrames = 2 ) start := bytes.Index(buf, []byte(skipStart)) p := start for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ { p = bytes.IndexByte(buf[p+1:], '\n') + p + 1 if p < 0 { break } } if p >= 0 { // buf[start:p+1] is the block to remove. // Copy buf[p+1:] over buf[start:] and shrink buf. copy(buf[start:], buf[p+1:]) buf = buf[:len(buf)-(p+1-start)] } // Add panic heading. head := fmt.Sprintf("panic: %v\n\n", x) if len(head) > len(buf) { // Extremely unlikely to happen. return head } copy(buf[len(head):], buf) copy(buf, head) return string(buf) } var ctxs = struct { sync.Mutex m map[*http.Request]*context bg *context // background context, lazily initialized // dec is used by tests to decorate the netcontext.Context returned // for a given request. This allows tests to add overrides (such as // WithAppIDOverride) to the context. The map is nil outside tests. dec map[*http.Request]func(netcontext.Context) netcontext.Context }{ m: make(map[*http.Request]*context), } // context represents the context of an in-flight HTTP request. // It implements the appengine.Context and http.ResponseWriter interfaces. type context struct { req *http.Request outCode int outHeader http.Header outBody []byte pendingLogs struct { sync.Mutex lines []*logpb.UserAppLogLine flushes int } apiURL *url.URL } var contextKey = "holds a *context" func fromContext(ctx netcontext.Context) *context { c, _ := ctx.Value(&contextKey).(*context) return c } func withContext(parent netcontext.Context, c *context) netcontext.Context { ctx := netcontext.WithValue(parent, &contextKey, c) if ns := c.req.Header.Get(curNamespaceHeader); ns != "" { ctx = withNamespace(ctx, ns) } return ctx } func toContext(c *context) netcontext.Context { return withContext(netcontext.Background(), c) } func IncomingHeaders(ctx netcontext.Context) http.Header { if c := fromContext(ctx); c != nil { return c.req.Header } return nil } func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { ctxs.Lock() c := ctxs.m[req] d := ctxs.dec[req] ctxs.Unlock() if d != nil { parent = d(parent) } if c == nil { // Someone passed in an http.Request that is not in-flight. // We panic here rather than panicking at a later point // so that stack traces will be more sensible. log.Panic("appengine: NewContext passed an unknown http.Request") } return withContext(parent, c) } func BackgroundContext() netcontext.Context { ctxs.Lock() defer ctxs.Unlock() if ctxs.bg != nil { return toContext(ctxs.bg) } // Compute background security ticket. appID := partitionlessAppID() escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1) majVersion := VersionID(nil) if i := strings.Index(majVersion, "."); i > 0 { majVersion = majVersion[:i] } ticket := fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID()) ctxs.bg = &context{ req: &http.Request{ Header: http.Header{ ticketHeader: []string{ticket}, }, }, apiURL: apiURL(), } // TODO(dsymonds): Wire up the shutdown handler to do a final flush. go ctxs.bg.logFlusher(make(chan int)) return toContext(ctxs.bg) } // RegisterTestRequest registers the HTTP request req for testing, such that // any API calls are sent to the provided URL. It returns a closure to delete // the registration. // It should only be used by aetest package. func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) func() { c := &context{ req: req, apiURL: apiURL, } ctxs.Lock() defer ctxs.Unlock() if _, ok := ctxs.m[req]; ok { log.Panic("req already associated with context") } if _, ok := ctxs.dec[req]; ok { log.Panic("req already associated with context") } if ctxs.dec == nil { ctxs.dec = make(map[*http.Request]func(netcontext.Context) netcontext.Context) } ctxs.m[req] = c ctxs.dec[req] = decorate return func() { ctxs.Lock() delete(ctxs.m, req) delete(ctxs.dec, req) ctxs.Unlock() } } var errTimeout = &CallError{ Detail: "Deadline exceeded", Code: int32(remotepb.RpcError_CANCELLED), Timeout: true, } func (c *context) Header() http.Header { return c.outHeader } // Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status // codes do not permit a response body (nor response entity headers such as // Content-Length, Content-Type, etc). func bodyAllowedForStatus(status int) bool { switch { case status >= 100 && status <= 199: return false case status == 204: return false case status == 304: return false } return true } func (c *context) Write(b []byte) (int, error) { if c.outCode == 0 { c.WriteHeader(http.StatusOK) } if len(b) > 0 && !bodyAllowedForStatus(c.outCode) { return 0, http.ErrBodyNotAllowed } c.outBody = append(c.outBody, b...) return len(b), nil } func (c *context) WriteHeader(code int) { if c.outCode != 0 { logf(c, 3, "WriteHeader called multiple times on request.") // error level return } c.outCode = code } func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) { hreq := &http.Request{ Method: "POST", URL: c.apiURL, Header: http.Header{ apiEndpointHeader: apiEndpointHeaderValue, apiMethodHeader: apiMethodHeaderValue, apiContentType: apiContentTypeValue, apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)}, }, Body: ioutil.NopCloser(bytes.NewReader(body)), ContentLength: int64(len(body)), Host: c.apiURL.Host, } if info := c.req.Header.Get(dapperHeader); info != "" { hreq.Header.Set(dapperHeader, info) } if info := c.req.Header.Get(traceHeader); info != "" { hreq.Header.Set(traceHeader, info) } tr := apiHTTPClient.Transport.(*http.Transport) var timedOut int32 // atomic; set to 1 if timed out t := time.AfterFunc(timeout, func() { atomic.StoreInt32(&timedOut, 1) tr.CancelRequest(hreq) }) defer t.Stop() defer func() { // Check if timeout was exceeded. if atomic.LoadInt32(&timedOut) != 0 { err = errTimeout } }() hresp, err := apiHTTPClient.Do(hreq) if err != nil { return nil, &CallError{ Detail: fmt.Sprintf("service bridge HTTP failed: %v", err), Code: int32(remotepb.RpcError_UNKNOWN), } } defer hresp.Body.Close() hrespBody, err := ioutil.ReadAll(hresp.Body) if hresp.StatusCode != 200 { return nil, &CallError{ Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody), Code: int32(remotepb.RpcError_UNKNOWN), } } if err != nil { return nil, &CallError{ Detail: fmt.Sprintf("service bridge response bad: %v", err), Code: int32(remotepb.RpcError_UNKNOWN), } } return hrespBody, nil } func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { if f, ctx, ok := callOverrideFromContext(ctx); ok { return f(ctx, service, method, in, out) } // Handle already-done contexts quickly. select { case <-ctx.Done(): return ctx.Err() default: } c := fromContext(ctx) if c == nil { // Give a good error message rather than a panic lower down. return errors.New("not an App Engine context") } // Apply transaction modifications if we're in a transaction. if t := transactionFromContext(ctx); t != nil { if t.finished { return errors.New("transaction context has expired") } applyTransaction(in, &t.transaction) } // Default RPC timeout is 60s. timeout := 60 * time.Second if deadline, ok := ctx.Deadline(); ok { timeout = deadline.Sub(time.Now()) } data, err := proto.Marshal(in) if err != nil { return err } ticket := c.req.Header.Get(ticketHeader) req := &remotepb.Request{ ServiceName: &service, Method: &method, Request: data, RequestId: &ticket, } hreqBody, err := proto.Marshal(req) if err != nil { return err } hrespBody, err := c.post(hreqBody, timeout) if err != nil { return err } res := &remotepb.Response{} if err := proto.Unmarshal(hrespBody, res); err != nil { return err } if res.RpcError != nil { ce := &CallError{ Detail: res.RpcError.GetDetail(), Code: *res.RpcError.Code, } switch remotepb.RpcError_ErrorCode(ce.Code) { case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED: ce.Timeout = true } return ce } if res.ApplicationError != nil { return &APIError{ Service: *req.ServiceName, Detail: res.ApplicationError.GetDetail(), Code: *res.ApplicationError.Code, } } if res.Exception != nil || res.JavaException != nil { // This shouldn't happen, but let's be defensive. return &CallError{ Detail: "service bridge returned exception", Code: int32(remotepb.RpcError_UNKNOWN), } } return proto.Unmarshal(res.Response, out) } func (c *context) Request() *http.Request { return c.req } func (c *context) addLogLine(ll *logpb.UserAppLogLine) { // Truncate long log lines. // TODO(dsymonds): Check if this is still necessary. const lim = 8 << 10 if len(*ll.Message) > lim { suffix := fmt.Sprintf("...(length %d)", len(*ll.Message)) ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix) } c.pendingLogs.Lock() c.pendingLogs.lines = append(c.pendingLogs.lines, ll) c.pendingLogs.Unlock() } var logLevelName = map[int64]string{ 0: "DEBUG", 1: "INFO", 2: "WARNING", 3: "ERROR", 4: "CRITICAL", } func logf(c *context, level int64, format string, args ...interface{}) { s := fmt.Sprintf(format, args...) s = strings.TrimRight(s, "\n") // Remove any trailing newline characters. c.addLogLine(&logpb.UserAppLogLine{ TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), Level: &level, Message: &s, }) log.Print(logLevelName[level] + ": " + s) } // flushLog attempts to flush any pending logs to the appserver. // It should not be called concurrently. func (c *context) flushLog(force bool) (flushed bool) { c.pendingLogs.Lock() // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious. n, rem := 0, 30<<20 for ; n < len(c.pendingLogs.lines); n++ { ll := c.pendingLogs.lines[n] // Each log line will require about 3 bytes of overhead. nb := proto.Size(ll) + 3 if nb > rem { break } rem -= nb } lines := c.pendingLogs.lines[:n] c.pendingLogs.lines = c.pendingLogs.lines[n:] c.pendingLogs.Unlock() if len(lines) == 0 && !force { // Nothing to flush. return false } rescueLogs := false defer func() { if rescueLogs { c.pendingLogs.Lock() c.pendingLogs.lines = append(lines, c.pendingLogs.lines...) c.pendingLogs.Unlock() } }() buf, err := proto.Marshal(&logpb.UserAppLogGroup{ LogLine: lines, }) if err != nil { log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err) rescueLogs = true return false } req := &logpb.FlushRequest{ Logs: buf, } res := &basepb.VoidProto{} c.pendingLogs.Lock() c.pendingLogs.flushes++ c.pendingLogs.Unlock() if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil { log.Printf("internal.flushLog: Flush RPC: %v", err) rescueLogs = true return false } return true } const ( // Log flushing parameters. flushInterval = 1 * time.Second forceFlushInterval = 60 * time.Second ) func (c *context) logFlusher(stop <-chan int) { lastFlush := time.Now() tick := time.NewTicker(flushInterval) for { select { case <-stop: // Request finished. tick.Stop() return case <-tick.C: force := time.Now().Sub(lastFlush) > forceFlushInterval if c.flushLog(force) { lastFlush = time.Now() } } } } func ContextForTesting(req *http.Request) netcontext.Context { return toContext(&context{req: req}) } ================================================ FILE: vendor/google.golang.org/appengine/internal/api_classic.go ================================================ // Copyright 2015 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. // +build appengine package internal import ( "errors" "net/http" "time" "appengine" "appengine_internal" basepb "appengine_internal/base" "github.com/golang/protobuf/proto" netcontext "golang.org/x/net/context" ) var contextKey = "holds an appengine.Context" func fromContext(ctx netcontext.Context) appengine.Context { c, _ := ctx.Value(&contextKey).(appengine.Context) return c } // This is only for classic App Engine adapters. func ClassicContextFromContext(ctx netcontext.Context) appengine.Context { return fromContext(ctx) } func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context { ctx := netcontext.WithValue(parent, &contextKey, c) s := &basepb.StringProto{} c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil) if ns := s.GetValue(); ns != "" { ctx = NamespacedContext(ctx, ns) } return ctx } func IncomingHeaders(ctx netcontext.Context) http.Header { if c := fromContext(ctx); c != nil { if req, ok := c.Request().(*http.Request); ok { return req.Header } } return nil } func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { c := appengine.NewContext(req) return withContext(parent, c) } func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { if f, ctx, ok := callOverrideFromContext(ctx); ok { return f(ctx, service, method, in, out) } // Handle already-done contexts quickly. select { case <-ctx.Done(): return ctx.Err() default: } c := fromContext(ctx) if c == nil { // Give a good error message rather than a panic lower down. return errors.New("not an App Engine context") } // Apply transaction modifications if we're in a transaction. if t := transactionFromContext(ctx); t != nil { if t.finished { return errors.New("transaction context has expired") } applyTransaction(in, &t.transaction) } var opts *appengine_internal.CallOptions if d, ok := ctx.Deadline(); ok { opts = &appengine_internal.CallOptions{ Timeout: d.Sub(time.Now()), } } err := c.Call(service, method, in, out, opts) switch v := err.(type) { case *appengine_internal.APIError: return &APIError{ Service: v.Service, Detail: v.Detail, Code: v.Code, } case *appengine_internal.CallError: return &CallError{ Detail: v.Detail, Code: v.Code, Timeout: v.Timeout, } } return err } func handleHTTP(w http.ResponseWriter, r *http.Request) { panic("handleHTTP called; this should be impossible") } func logf(c appengine.Context, level int64, format string, args ...interface{}) { var fn func(format string, args ...interface{}) switch level { case 0: fn = c.Debugf case 1: fn = c.Infof case 2: fn = c.Warningf case 3: fn = c.Errorf case 4: fn = c.Criticalf default: // This shouldn't happen. fn = c.Criticalf } fn(format, args...) } ================================================ FILE: vendor/google.golang.org/appengine/internal/api_common.go ================================================ // Copyright 2015 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. package internal import ( "github.com/golang/protobuf/proto" netcontext "golang.org/x/net/context" ) type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error var callOverrideKey = "holds []CallOverrideFunc" func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context { // We avoid appending to any existing call override // so we don't risk overwriting a popped stack below. var cofs []CallOverrideFunc if uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok { cofs = append(cofs, uf...) } cofs = append(cofs, f) return netcontext.WithValue(ctx, &callOverrideKey, cofs) } func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) { cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc) if len(cofs) == 0 { return nil, nil, false } // We found a list of overrides; grab the last, and reconstitute a // context that will hide it. f := cofs[len(cofs)-1] ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) return f, ctx, true } type logOverrideFunc func(level int64, format string, args ...interface{}) var logOverrideKey = "holds a logOverrideFunc" func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context { return netcontext.WithValue(ctx, &logOverrideKey, f) } var appIDOverrideKey = "holds a string, being the full app ID" func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context { return netcontext.WithValue(ctx, &appIDOverrideKey, appID) } var namespaceKey = "holds the namespace string" func withNamespace(ctx netcontext.Context, ns string) netcontext.Context { return netcontext.WithValue(ctx, &namespaceKey, ns) } func NamespaceFromContext(ctx netcontext.Context) string { // If there's no namespace, return the empty string. ns, _ := ctx.Value(&namespaceKey).(string) return ns } // FullyQualifiedAppID returns the fully-qualified application ID. // This may contain a partition prefix (e.g. "s~" for High Replication apps), // or a domain prefix (e.g. "example.com:"). func FullyQualifiedAppID(ctx netcontext.Context) string { if id, ok := ctx.Value(&appIDOverrideKey).(string); ok { return id } return fullyQualifiedAppID(ctx) } func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) { if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok { f(level, format, args...) return } logf(fromContext(ctx), level, format, args...) } // NamespacedContext wraps a Context to support namespaces. func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context { n := &namespacedContext{ namespace: namespace, } return withNamespace(WithCallOverride(ctx, n.call), namespace) } type namespacedContext struct { namespace string } func (n *namespacedContext) call(ctx netcontext.Context, service, method string, in, out proto.Message) error { // Apply any namespace mods. if mod, ok := NamespaceMods[service]; ok { mod(in, n.namespace) } return Call(ctx, service, method, in, out) } ================================================ FILE: vendor/google.golang.org/appengine/internal/app_id.go ================================================ // Copyright 2011 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. package internal import ( "strings" ) func parseFullAppID(appid string) (partition, domain, displayID string) { if i := strings.Index(appid, "~"); i != -1 { partition, appid = appid[:i], appid[i+1:] } if i := strings.Index(appid, ":"); i != -1 { domain, appid = appid[:i], appid[i+1:] } return partition, domain, appid } // appID returns "appid" or "domain.com:appid". func appID(fullAppID string) string { _, dom, dis := parseFullAppID(fullAppID) if dom != "" { return dom + ":" + dis } return dis } ================================================ FILE: vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go ================================================ // Code generated by protoc-gen-go. // source: google.golang.org/appengine/internal/app_identity/app_identity_service.proto // DO NOT EDIT! /* Package app_identity is a generated protocol buffer package. It is generated from these files: google.golang.org/appengine/internal/app_identity/app_identity_service.proto It has these top-level messages: AppIdentityServiceError SignForAppRequest SignForAppResponse GetPublicCertificateForAppRequest PublicCertificate GetPublicCertificateForAppResponse GetServiceAccountNameRequest GetServiceAccountNameResponse GetAccessTokenRequest GetAccessTokenResponse GetDefaultGcsBucketNameRequest GetDefaultGcsBucketNameResponse */ package app_identity import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf type AppIdentityServiceError_ErrorCode int32 const ( AppIdentityServiceError_SUCCESS AppIdentityServiceError_ErrorCode = 0 AppIdentityServiceError_UNKNOWN_SCOPE AppIdentityServiceError_ErrorCode = 9 AppIdentityServiceError_BLOB_TOO_LARGE AppIdentityServiceError_ErrorCode = 1000 AppIdentityServiceError_DEADLINE_EXCEEDED AppIdentityServiceError_ErrorCode = 1001 AppIdentityServiceError_NOT_A_VALID_APP AppIdentityServiceError_ErrorCode = 1002 AppIdentityServiceError_UNKNOWN_ERROR AppIdentityServiceError_ErrorCode = 1003 AppIdentityServiceError_NOT_ALLOWED AppIdentityServiceError_ErrorCode = 1005 AppIdentityServiceError_NOT_IMPLEMENTED AppIdentityServiceError_ErrorCode = 1006 ) var AppIdentityServiceError_ErrorCode_name = map[int32]string{ 0: "SUCCESS", 9: "UNKNOWN_SCOPE", 1000: "BLOB_TOO_LARGE", 1001: "DEADLINE_EXCEEDED", 1002: "NOT_A_VALID_APP", 1003: "UNKNOWN_ERROR", 1005: "NOT_ALLOWED", 1006: "NOT_IMPLEMENTED", } var AppIdentityServiceError_ErrorCode_value = map[string]int32{ "SUCCESS": 0, "UNKNOWN_SCOPE": 9, "BLOB_TOO_LARGE": 1000, "DEADLINE_EXCEEDED": 1001, "NOT_A_VALID_APP": 1002, "UNKNOWN_ERROR": 1003, "NOT_ALLOWED": 1005, "NOT_IMPLEMENTED": 1006, } func (x AppIdentityServiceError_ErrorCode) Enum() *AppIdentityServiceError_ErrorCode { p := new(AppIdentityServiceError_ErrorCode) *p = x return p } func (x AppIdentityServiceError_ErrorCode) String() string { return proto.EnumName(AppIdentityServiceError_ErrorCode_name, int32(x)) } func (x *AppIdentityServiceError_ErrorCode) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(AppIdentityServiceError_ErrorCode_value, data, "AppIdentityServiceError_ErrorCode") if err != nil { return err } *x = AppIdentityServiceError_ErrorCode(value) return nil } type AppIdentityServiceError struct { XXX_unrecognized []byte `json:"-"` } func (m *AppIdentityServiceError) Reset() { *m = AppIdentityServiceError{} } func (m *AppIdentityServiceError) String() string { return proto.CompactTextString(m) } func (*AppIdentityServiceError) ProtoMessage() {} type SignForAppRequest struct { BytesToSign []byte `protobuf:"bytes,1,opt,name=bytes_to_sign" json:"bytes_to_sign,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *SignForAppRequest) Reset() { *m = SignForAppRequest{} } func (m *SignForAppRequest) String() string { return proto.CompactTextString(m) } func (*SignForAppRequest) ProtoMessage() {} func (m *SignForAppRequest) GetBytesToSign() []byte { if m != nil { return m.BytesToSign } return nil } type SignForAppResponse struct { KeyName *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"` SignatureBytes []byte `protobuf:"bytes,2,opt,name=signature_bytes" json:"signature_bytes,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *SignForAppResponse) Reset() { *m = SignForAppResponse{} } func (m *SignForAppResponse) String() string { return proto.CompactTextString(m) } func (*SignForAppResponse) ProtoMessage() {} func (m *SignForAppResponse) GetKeyName() string { if m != nil && m.KeyName != nil { return *m.KeyName } return "" } func (m *SignForAppResponse) GetSignatureBytes() []byte { if m != nil { return m.SignatureBytes } return nil } type GetPublicCertificateForAppRequest struct { XXX_unrecognized []byte `json:"-"` } func (m *GetPublicCertificateForAppRequest) Reset() { *m = GetPublicCertificateForAppRequest{} } func (m *GetPublicCertificateForAppRequest) String() string { return proto.CompactTextString(m) } func (*GetPublicCertificateForAppRequest) ProtoMessage() {} type PublicCertificate struct { KeyName *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"` X509CertificatePem *string `protobuf:"bytes,2,opt,name=x509_certificate_pem" json:"x509_certificate_pem,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *PublicCertificate) Reset() { *m = PublicCertificate{} } func (m *PublicCertificate) String() string { return proto.CompactTextString(m) } func (*PublicCertificate) ProtoMessage() {} func (m *PublicCertificate) GetKeyName() string { if m != nil && m.KeyName != nil { return *m.KeyName } return "" } func (m *PublicCertificate) GetX509CertificatePem() string { if m != nil && m.X509CertificatePem != nil { return *m.X509CertificatePem } return "" } type GetPublicCertificateForAppResponse struct { PublicCertificateList []*PublicCertificate `protobuf:"bytes,1,rep,name=public_certificate_list" json:"public_certificate_list,omitempty"` MaxClientCacheTimeInSecond *int64 `protobuf:"varint,2,opt,name=max_client_cache_time_in_second" json:"max_client_cache_time_in_second,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *GetPublicCertificateForAppResponse) Reset() { *m = GetPublicCertificateForAppResponse{} } func (m *GetPublicCertificateForAppResponse) String() string { return proto.CompactTextString(m) } func (*GetPublicCertificateForAppResponse) ProtoMessage() {} func (m *GetPublicCertificateForAppResponse) GetPublicCertificateList() []*PublicCertificate { if m != nil { return m.PublicCertificateList } return nil } func (m *GetPublicCertificateForAppResponse) GetMaxClientCacheTimeInSecond() int64 { if m != nil && m.MaxClientCacheTimeInSecond != nil { return *m.MaxClientCacheTimeInSecond } return 0 } type GetServiceAccountNameRequest struct { XXX_unrecognized []byte `json:"-"` } func (m *GetServiceAccountNameRequest) Reset() { *m = GetServiceAccountNameRequest{} } func (m *GetServiceAccountNameRequest) String() string { return proto.CompactTextString(m) } func (*GetServiceAccountNameRequest) ProtoMessage() {} type GetServiceAccountNameResponse struct { ServiceAccountName *string `protobuf:"bytes,1,opt,name=service_account_name" json:"service_account_name,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *GetServiceAccountNameResponse) Reset() { *m = GetServiceAccountNameResponse{} } func (m *GetServiceAccountNameResponse) String() string { return proto.CompactTextString(m) } func (*GetServiceAccountNameResponse) ProtoMessage() {} func (m *GetServiceAccountNameResponse) GetServiceAccountName() string { if m != nil && m.ServiceAccountName != nil { return *m.ServiceAccountName } return "" } type GetAccessTokenRequest struct { Scope []string `protobuf:"bytes,1,rep,name=scope" json:"scope,omitempty"` ServiceAccountId *int64 `protobuf:"varint,2,opt,name=service_account_id" json:"service_account_id,omitempty"` ServiceAccountName *string `protobuf:"bytes,3,opt,name=service_account_name" json:"service_account_name,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *GetAccessTokenRequest) Reset() { *m = GetAccessTokenRequest{} } func (m *GetAccessTokenRequest) String() string { return proto.CompactTextString(m) } func (*GetAccessTokenRequest) ProtoMessage() {} func (m *GetAccessTokenRequest) GetScope() []string { if m != nil { return m.Scope } return nil } func (m *GetAccessTokenRequest) GetServiceAccountId() int64 { if m != nil && m.ServiceAccountId != nil { return *m.ServiceAccountId } return 0 } func (m *GetAccessTokenRequest) GetServiceAccountName() string { if m != nil && m.ServiceAccountName != nil { return *m.ServiceAccountName } return "" } type GetAccessTokenResponse struct { AccessToken *string `protobuf:"bytes,1,opt,name=access_token" json:"access_token,omitempty"` ExpirationTime *int64 `protobuf:"varint,2,opt,name=expiration_time" json:"expiration_time,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *GetAccessTokenResponse) Reset() { *m = GetAccessTokenResponse{} } func (m *GetAccessTokenResponse) String() string { return proto.CompactTextString(m) } func (*GetAccessTokenResponse) ProtoMessage() {} func (m *GetAccessTokenResponse) GetAccessToken() string { if m != nil && m.AccessToken != nil { return *m.AccessToken } return "" } func (m *GetAccessTokenResponse) GetExpirationTime() int64 { if m != nil && m.ExpirationTime != nil { return *m.ExpirationTime } return 0 } type GetDefaultGcsBucketNameRequest struct { XXX_unrecognized []byte `json:"-"` } func (m *GetDefaultGcsBucketNameRequest) Reset() { *m = GetDefaultGcsBucketNameRequest{} } func (m *GetDefaultGcsBucketNameRequest) String() string { return proto.CompactTextString(m) } func (*GetDefaultGcsBucketNameRequest) ProtoMessage() {} type GetDefaultGcsBucketNameResponse struct { DefaultGcsBucketName *string `protobuf:"bytes,1,opt,name=default_gcs_bucket_name" json:"default_gcs_bucket_name,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *GetDefaultGcsBucketNameResponse) Reset() { *m = GetDefaultGcsBucketNameResponse{} } func (m *GetDefaultGcsBucketNameResponse) String() string { return proto.CompactTextString(m) } func (*GetDefaultGcsBucketNameResponse) ProtoMessage() {} func (m *GetDefaultGcsBucketNameResponse) GetDefaultGcsBucketName() string { if m != nil && m.DefaultGcsBucketName != nil { return *m.DefaultGcsBucketName } return "" } func init() { } ================================================ FILE: vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto ================================================ syntax = "proto2"; option go_package = "app_identity"; package appengine; message AppIdentityServiceError { enum ErrorCode { SUCCESS = 0; UNKNOWN_SCOPE = 9; BLOB_TOO_LARGE = 1000; DEADLINE_EXCEEDED = 1001; NOT_A_VALID_APP = 1002; UNKNOWN_ERROR = 1003; NOT_ALLOWED = 1005; NOT_IMPLEMENTED = 1006; } } message SignForAppRequest { optional bytes bytes_to_sign = 1; } message SignForAppResponse { optional string key_name = 1; optional bytes signature_bytes = 2; } message GetPublicCertificateForAppRequest { } message PublicCertificate { optional string key_name = 1; optional string x509_certificate_pem = 2; } message GetPublicCertificateForAppResponse { repeated PublicCertificate public_certificate_list = 1; optional int64 max_client_cache_time_in_second = 2; } message GetServiceAccountNameRequest { } message GetServiceAccountNameResponse { optional string service_account_name = 1; } message GetAccessTokenRequest { repeated string scope = 1; optional int64 service_account_id = 2; optional string service_account_name = 3; } message GetAccessTokenResponse { optional string access_token = 1; optional int64 expiration_time = 2; } message GetDefaultGcsBucketNameRequest { } message GetDefaultGcsBucketNameResponse { optional string default_gcs_bucket_name = 1; } ================================================ FILE: vendor/google.golang.org/appengine/internal/base/api_base.pb.go ================================================ // Code generated by protoc-gen-go. // source: google.golang.org/appengine/internal/base/api_base.proto // DO NOT EDIT! /* Package base is a generated protocol buffer package. It is generated from these files: google.golang.org/appengine/internal/base/api_base.proto It has these top-level messages: StringProto Integer32Proto Integer64Proto BoolProto DoubleProto BytesProto VoidProto */ package base import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf type StringProto struct { Value *string `protobuf:"bytes,1,req,name=value" json:"value,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *StringProto) Reset() { *m = StringProto{} } func (m *StringProto) String() string { return proto.CompactTextString(m) } func (*StringProto) ProtoMessage() {} func (m *StringProto) GetValue() string { if m != nil && m.Value != nil { return *m.Value } return "" } type Integer32Proto struct { Value *int32 `protobuf:"varint,1,req,name=value" json:"value,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Integer32Proto) Reset() { *m = Integer32Proto{} } func (m *Integer32Proto) String() string { return proto.CompactTextString(m) } func (*Integer32Proto) ProtoMessage() {} func (m *Integer32Proto) GetValue() int32 { if m != nil && m.Value != nil { return *m.Value } return 0 } type Integer64Proto struct { Value *int64 `protobuf:"varint,1,req,name=value" json:"value,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Integer64Proto) Reset() { *m = Integer64Proto{} } func (m *Integer64Proto) String() string { return proto.CompactTextString(m) } func (*Integer64Proto) ProtoMessage() {} func (m *Integer64Proto) GetValue() int64 { if m != nil && m.Value != nil { return *m.Value } return 0 } type BoolProto struct { Value *bool `protobuf:"varint,1,req,name=value" json:"value,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *BoolProto) Reset() { *m = BoolProto{} } func (m *BoolProto) String() string { return proto.CompactTextString(m) } func (*BoolProto) ProtoMessage() {} func (m *BoolProto) GetValue() bool { if m != nil && m.Value != nil { return *m.Value } return false } type DoubleProto struct { Value *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *DoubleProto) Reset() { *m = DoubleProto{} } func (m *DoubleProto) String() string { return proto.CompactTextString(m) } func (*DoubleProto) ProtoMessage() {} func (m *DoubleProto) GetValue() float64 { if m != nil && m.Value != nil { return *m.Value } return 0 } type BytesProto struct { Value []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *BytesProto) Reset() { *m = BytesProto{} } func (m *BytesProto) String() string { return proto.CompactTextString(m) } func (*BytesProto) ProtoMessage() {} func (m *BytesProto) GetValue() []byte { if m != nil { return m.Value } return nil } type VoidProto struct { XXX_unrecognized []byte `json:"-"` } func (m *VoidProto) Reset() { *m = VoidProto{} } func (m *VoidProto) String() string { return proto.CompactTextString(m) } func (*VoidProto) ProtoMessage() {} ================================================ FILE: vendor/google.golang.org/appengine/internal/base/api_base.proto ================================================ // Built-in base types for API calls. Primarily useful as return types. syntax = "proto2"; option go_package = "base"; package appengine.base; message StringProto { required string value = 1; } message Integer32Proto { required int32 value = 1; } message Integer64Proto { required int64 value = 1; } message BoolProto { required bool value = 1; } message DoubleProto { required double value = 1; } message BytesProto { required bytes value = 1 [ctype=CORD]; } message VoidProto { } ================================================ FILE: vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.pb.go ================================================ // Code generated by protoc-gen-go. // source: google.golang.org/appengine/internal/blobstore/blobstore_service.proto // DO NOT EDIT! /* Package blobstore is a generated protocol buffer package. It is generated from these files: google.golang.org/appengine/internal/blobstore/blobstore_service.proto It has these top-level messages: BlobstoreServiceError CreateUploadURLRequest CreateUploadURLResponse DeleteBlobRequest FetchDataRequest FetchDataResponse CloneBlobRequest CloneBlobResponse DecodeBlobKeyRequest DecodeBlobKeyResponse CreateEncodedGoogleStorageKeyRequest CreateEncodedGoogleStorageKeyResponse */ package blobstore import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf type BlobstoreServiceError_ErrorCode int32 const ( BlobstoreServiceError_OK BlobstoreServiceError_ErrorCode = 0 BlobstoreServiceError_INTERNAL_ERROR BlobstoreServiceError_ErrorCode = 1 BlobstoreServiceError_URL_TOO_LONG BlobstoreServiceError_ErrorCode = 2 BlobstoreServiceError_PERMISSION_DENIED BlobstoreServiceError_ErrorCode = 3 BlobstoreServiceError_BLOB_NOT_FOUND BlobstoreServiceError_ErrorCode = 4 BlobstoreServiceError_DATA_INDEX_OUT_OF_RANGE BlobstoreServiceError_ErrorCode = 5 BlobstoreServiceError_BLOB_FETCH_SIZE_TOO_LARGE BlobstoreServiceError_ErrorCode = 6 BlobstoreServiceError_ARGUMENT_OUT_OF_RANGE BlobstoreServiceError_ErrorCode = 8 BlobstoreServiceError_INVALID_BLOB_KEY BlobstoreServiceError_ErrorCode = 9 ) var BlobstoreServiceError_ErrorCode_name = map[int32]string{ 0: "OK", 1: "INTERNAL_ERROR", 2: "URL_TOO_LONG", 3: "PERMISSION_DENIED", 4: "BLOB_NOT_FOUND", 5: "DATA_INDEX_OUT_OF_RANGE", 6: "BLOB_FETCH_SIZE_TOO_LARGE", 8: "ARGUMENT_OUT_OF_RANGE", 9: "INVALID_BLOB_KEY", } var BlobstoreServiceError_ErrorCode_value = map[string]int32{ "OK": 0, "INTERNAL_ERROR": 1, "URL_TOO_LONG": 2, "PERMISSION_DENIED": 3, "BLOB_NOT_FOUND": 4, "DATA_INDEX_OUT_OF_RANGE": 5, "BLOB_FETCH_SIZE_TOO_LARGE": 6, "ARGUMENT_OUT_OF_RANGE": 8, "INVALID_BLOB_KEY": 9, } func (x BlobstoreServiceError_ErrorCode) Enum() *BlobstoreServiceError_ErrorCode { p := new(BlobstoreServiceError_ErrorCode) *p = x return p } func (x BlobstoreServiceError_ErrorCode) String() string { return proto.EnumName(BlobstoreServiceError_ErrorCode_name, int32(x)) } func (x *BlobstoreServiceError_ErrorCode) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(BlobstoreServiceError_ErrorCode_value, data, "BlobstoreServiceError_ErrorCode") if err != nil { return err } *x = BlobstoreServiceError_ErrorCode(value) return nil } type BlobstoreServiceError struct { XXX_unrecognized []byte `json:"-"` } func (m *BlobstoreServiceError) Reset() { *m = BlobstoreServiceError{} } func (m *BlobstoreServiceError) String() string { return proto.CompactTextString(m) } func (*BlobstoreServiceError) ProtoMessage() {} type CreateUploadURLRequest struct { SuccessPath *string `protobuf:"bytes,1,req,name=success_path" json:"success_path,omitempty"` MaxUploadSizeBytes *int64 `protobuf:"varint,2,opt,name=max_upload_size_bytes" json:"max_upload_size_bytes,omitempty"` MaxUploadSizePerBlobBytes *int64 `protobuf:"varint,3,opt,name=max_upload_size_per_blob_bytes" json:"max_upload_size_per_blob_bytes,omitempty"` GsBucketName *string `protobuf:"bytes,4,opt,name=gs_bucket_name" json:"gs_bucket_name,omitempty"` UrlExpiryTimeSeconds *int32 `protobuf:"varint,5,opt,name=url_expiry_time_seconds" json:"url_expiry_time_seconds,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *CreateUploadURLRequest) Reset() { *m = CreateUploadURLRequest{} } func (m *CreateUploadURLRequest) String() string { return proto.CompactTextString(m) } func (*CreateUploadURLRequest) ProtoMessage() {} func (m *CreateUploadURLRequest) GetSuccessPath() string { if m != nil && m.SuccessPath != nil { return *m.SuccessPath } return "" } func (m *CreateUploadURLRequest) GetMaxUploadSizeBytes() int64 { if m != nil && m.MaxUploadSizeBytes != nil { return *m.MaxUploadSizeBytes } return 0 } func (m *CreateUploadURLRequest) GetMaxUploadSizePerBlobBytes() int64 { if m != nil && m.MaxUploadSizePerBlobBytes != nil { return *m.MaxUploadSizePerBlobBytes } return 0 } func (m *CreateUploadURLRequest) GetGsBucketName() string { if m != nil && m.GsBucketName != nil { return *m.GsBucketName } return "" } func (m *CreateUploadURLRequest) GetUrlExpiryTimeSeconds() int32 { if m != nil && m.UrlExpiryTimeSeconds != nil { return *m.UrlExpiryTimeSeconds } return 0 } type CreateUploadURLResponse struct { Url *string `protobuf:"bytes,1,req,name=url" json:"url,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *CreateUploadURLResponse) Reset() { *m = CreateUploadURLResponse{} } func (m *CreateUploadURLResponse) String() string { return proto.CompactTextString(m) } func (*CreateUploadURLResponse) ProtoMessage() {} func (m *CreateUploadURLResponse) GetUrl() string { if m != nil && m.Url != nil { return *m.Url } return "" } type DeleteBlobRequest struct { BlobKey []string `protobuf:"bytes,1,rep,name=blob_key" json:"blob_key,omitempty"` Token *string `protobuf:"bytes,2,opt,name=token" json:"token,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *DeleteBlobRequest) Reset() { *m = DeleteBlobRequest{} } func (m *DeleteBlobRequest) String() string { return proto.CompactTextString(m) } func (*DeleteBlobRequest) ProtoMessage() {} func (m *DeleteBlobRequest) GetBlobKey() []string { if m != nil { return m.BlobKey } return nil } func (m *DeleteBlobRequest) GetToken() string { if m != nil && m.Token != nil { return *m.Token } return "" } type FetchDataRequest struct { BlobKey *string `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"` StartIndex *int64 `protobuf:"varint,2,req,name=start_index" json:"start_index,omitempty"` EndIndex *int64 `protobuf:"varint,3,req,name=end_index" json:"end_index,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *FetchDataRequest) Reset() { *m = FetchDataRequest{} } func (m *FetchDataRequest) String() string { return proto.CompactTextString(m) } func (*FetchDataRequest) ProtoMessage() {} func (m *FetchDataRequest) GetBlobKey() string { if m != nil && m.BlobKey != nil { return *m.BlobKey } return "" } func (m *FetchDataRequest) GetStartIndex() int64 { if m != nil && m.StartIndex != nil { return *m.StartIndex } return 0 } func (m *FetchDataRequest) GetEndIndex() int64 { if m != nil && m.EndIndex != nil { return *m.EndIndex } return 0 } type FetchDataResponse struct { Data []byte `protobuf:"bytes,1000,req,name=data" json:"data,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *FetchDataResponse) Reset() { *m = FetchDataResponse{} } func (m *FetchDataResponse) String() string { return proto.CompactTextString(m) } func (*FetchDataResponse) ProtoMessage() {} func (m *FetchDataResponse) GetData() []byte { if m != nil { return m.Data } return nil } type CloneBlobRequest struct { BlobKey []byte `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"` MimeType []byte `protobuf:"bytes,2,req,name=mime_type" json:"mime_type,omitempty"` TargetAppId []byte `protobuf:"bytes,3,req,name=target_app_id" json:"target_app_id,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *CloneBlobRequest) Reset() { *m = CloneBlobRequest{} } func (m *CloneBlobRequest) String() string { return proto.CompactTextString(m) } func (*CloneBlobRequest) ProtoMessage() {} func (m *CloneBlobRequest) GetBlobKey() []byte { if m != nil { return m.BlobKey } return nil } func (m *CloneBlobRequest) GetMimeType() []byte { if m != nil { return m.MimeType } return nil } func (m *CloneBlobRequest) GetTargetAppId() []byte { if m != nil { return m.TargetAppId } return nil } type CloneBlobResponse struct { BlobKey []byte `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *CloneBlobResponse) Reset() { *m = CloneBlobResponse{} } func (m *CloneBlobResponse) String() string { return proto.CompactTextString(m) } func (*CloneBlobResponse) ProtoMessage() {} func (m *CloneBlobResponse) GetBlobKey() []byte { if m != nil { return m.BlobKey } return nil } type DecodeBlobKeyRequest struct { BlobKey []string `protobuf:"bytes,1,rep,name=blob_key" json:"blob_key,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *DecodeBlobKeyRequest) Reset() { *m = DecodeBlobKeyRequest{} } func (m *DecodeBlobKeyRequest) String() string { return proto.CompactTextString(m) } func (*DecodeBlobKeyRequest) ProtoMessage() {} func (m *DecodeBlobKeyRequest) GetBlobKey() []string { if m != nil { return m.BlobKey } return nil } type DecodeBlobKeyResponse struct { Decoded []string `protobuf:"bytes,1,rep,name=decoded" json:"decoded,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *DecodeBlobKeyResponse) Reset() { *m = DecodeBlobKeyResponse{} } func (m *DecodeBlobKeyResponse) String() string { return proto.CompactTextString(m) } func (*DecodeBlobKeyResponse) ProtoMessage() {} func (m *DecodeBlobKeyResponse) GetDecoded() []string { if m != nil { return m.Decoded } return nil } type CreateEncodedGoogleStorageKeyRequest struct { Filename *string `protobuf:"bytes,1,req,name=filename" json:"filename,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *CreateEncodedGoogleStorageKeyRequest) Reset() { *m = CreateEncodedGoogleStorageKeyRequest{} } func (m *CreateEncodedGoogleStorageKeyRequest) String() string { return proto.CompactTextString(m) } func (*CreateEncodedGoogleStorageKeyRequest) ProtoMessage() {} func (m *CreateEncodedGoogleStorageKeyRequest) GetFilename() string { if m != nil && m.Filename != nil { return *m.Filename } return "" } type CreateEncodedGoogleStorageKeyResponse struct { BlobKey *string `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *CreateEncodedGoogleStorageKeyResponse) Reset() { *m = CreateEncodedGoogleStorageKeyResponse{} } func (m *CreateEncodedGoogleStorageKeyResponse) String() string { return proto.CompactTextString(m) } func (*CreateEncodedGoogleStorageKeyResponse) ProtoMessage() {} func (m *CreateEncodedGoogleStorageKeyResponse) GetBlobKey() string { if m != nil && m.BlobKey != nil { return *m.BlobKey } return "" } func init() { } ================================================ FILE: vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.proto ================================================ syntax = "proto2"; option go_package = "blobstore"; package appengine; message BlobstoreServiceError { enum ErrorCode { OK = 0; INTERNAL_ERROR = 1; URL_TOO_LONG = 2; PERMISSION_DENIED = 3; BLOB_NOT_FOUND = 4; DATA_INDEX_OUT_OF_RANGE = 5; BLOB_FETCH_SIZE_TOO_LARGE = 6; ARGUMENT_OUT_OF_RANGE = 8; INVALID_BLOB_KEY = 9; } } message CreateUploadURLRequest { required string success_path = 1; optional int64 max_upload_size_bytes = 2; optional int64 max_upload_size_per_blob_bytes = 3; optional string gs_bucket_name = 4; optional int32 url_expiry_time_seconds = 5; } message CreateUploadURLResponse { required string url = 1; } message DeleteBlobRequest { repeated string blob_key = 1; optional string token = 2; } message FetchDataRequest { required string blob_key = 1; required int64 start_index = 2; required int64 end_index = 3; } message FetchDataResponse { required bytes data = 1000 [ctype = CORD]; } message CloneBlobRequest { required bytes blob_key = 1; required bytes mime_type = 2; required bytes target_app_id = 3; } message CloneBlobResponse { required bytes blob_key = 1; } message DecodeBlobKeyRequest { repeated string blob_key = 1; } message DecodeBlobKeyResponse { repeated string decoded = 1; } message CreateEncodedGoogleStorageKeyRequest { required string filename = 1; } message CreateEncodedGoogleStorageKeyResponse { required string blob_key = 1; } ================================================ FILE: vendor/google.golang.org/appengine/internal/capability/capability_service.pb.go ================================================ // Code generated by protoc-gen-go. // source: google.golang.org/appengine/internal/capability/capability_service.proto // DO NOT EDIT! /* Package channel is a generated protocol buffer package. It is generated from these files: google.golang.org/appengine/internal/capability/capability_service.proto It has these top-level messages: IsEnabledRequest IsEnabledResponse */ package channel import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf type IsEnabledResponse_SummaryStatus int32 const ( IsEnabledResponse_DEFAULT IsEnabledResponse_SummaryStatus = 0 IsEnabledResponse_ENABLED IsEnabledResponse_SummaryStatus = 1 IsEnabledResponse_SCHEDULED_FUTURE IsEnabledResponse_SummaryStatus = 2 IsEnabledResponse_SCHEDULED_NOW IsEnabledResponse_SummaryStatus = 3 IsEnabledResponse_DISABLED IsEnabledResponse_SummaryStatus = 4 IsEnabledResponse_UNKNOWN IsEnabledResponse_SummaryStatus = 5 ) var IsEnabledResponse_SummaryStatus_name = map[int32]string{ 0: "DEFAULT", 1: "ENABLED", 2: "SCHEDULED_FUTURE", 3: "SCHEDULED_NOW", 4: "DISABLED", 5: "UNKNOWN", } var IsEnabledResponse_SummaryStatus_value = map[string]int32{ "DEFAULT": 0, "ENABLED": 1, "SCHEDULED_FUTURE": 2, "SCHEDULED_NOW": 3, "DISABLED": 4, "UNKNOWN": 5, } func (x IsEnabledResponse_SummaryStatus) Enum() *IsEnabledResponse_SummaryStatus { p := new(IsEnabledResponse_SummaryStatus) *p = x return p } func (x IsEnabledResponse_SummaryStatus) String() string { return proto.EnumName(IsEnabledResponse_SummaryStatus_name, int32(x)) } func (x *IsEnabledResponse_SummaryStatus) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(IsEnabledResponse_SummaryStatus_value, data, "IsEnabledResponse_SummaryStatus") if err != nil { return err } *x = IsEnabledResponse_SummaryStatus(value) return nil } type IsEnabledRequest struct { Package *string `protobuf:"bytes,1,req,name=package" json:"package,omitempty"` Capability []string `protobuf:"bytes,2,rep,name=capability" json:"capability,omitempty"` Call []string `protobuf:"bytes,3,rep,name=call" json:"call,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *IsEnabledRequest) Reset() { *m = IsEnabledRequest{} } func (m *IsEnabledRequest) String() string { return proto.CompactTextString(m) } func (*IsEnabledRequest) ProtoMessage() {} func (m *IsEnabledRequest) GetPackage() string { if m != nil && m.Package != nil { return *m.Package } return "" } func (m *IsEnabledRequest) GetCapability() []string { if m != nil { return m.Capability } return nil } func (m *IsEnabledRequest) GetCall() []string { if m != nil { return m.Call } return nil } type IsEnabledResponse struct { SummaryStatus *IsEnabledResponse_SummaryStatus `protobuf:"varint,1,opt,name=summary_status,enum=appengine.IsEnabledResponse_SummaryStatus" json:"summary_status,omitempty"` TimeUntilScheduled *int64 `protobuf:"varint,2,opt,name=time_until_scheduled" json:"time_until_scheduled,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *IsEnabledResponse) Reset() { *m = IsEnabledResponse{} } func (m *IsEnabledResponse) String() string { return proto.CompactTextString(m) } func (*IsEnabledResponse) ProtoMessage() {} func (m *IsEnabledResponse) GetSummaryStatus() IsEnabledResponse_SummaryStatus { if m != nil && m.SummaryStatus != nil { return *m.SummaryStatus } return IsEnabledResponse_DEFAULT } func (m *IsEnabledResponse) GetTimeUntilScheduled() int64 { if m != nil && m.TimeUntilScheduled != nil { return *m.TimeUntilScheduled } return 0 } ================================================ FILE: vendor/google.golang.org/appengine/internal/capability/capability_service.proto ================================================ syntax = "proto2"; option go_package = "channel"; package appengine; message IsEnabledRequest { required string package = 1; repeated string capability = 2; repeated string call = 3; } message IsEnabledResponse { enum SummaryStatus { DEFAULT = 0; ENABLED = 1; SCHEDULED_FUTURE = 2; SCHEDULED_NOW = 3; DISABLED = 4; UNKNOWN = 5; } optional SummaryStatus summary_status = 1; optional int64 time_until_scheduled = 2; } service CapabilityService { rpc IsEnabled(IsEnabledRequest) returns (IsEnabledResponse) {}; } ================================================ FILE: vendor/google.golang.org/appengine/internal/channel/channel_service.pb.go ================================================ // Code generated by protoc-gen-go. // source: google.golang.org/appengine/internal/channel/channel_service.proto // DO NOT EDIT! /* Package channel is a generated protocol buffer package. It is generated from these files: google.golang.org/appengine/internal/channel/channel_service.proto It has these top-level messages: ChannelServiceError CreateChannelRequest CreateChannelResponse SendMessageRequest */ package channel import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf type ChannelServiceError_ErrorCode int32 const ( ChannelServiceError_OK ChannelServiceError_ErrorCode = 0 ChannelServiceError_INTERNAL_ERROR ChannelServiceError_ErrorCode = 1 ChannelServiceError_INVALID_CHANNEL_KEY ChannelServiceError_ErrorCode = 2 ChannelServiceError_BAD_MESSAGE ChannelServiceError_ErrorCode = 3 ChannelServiceError_INVALID_CHANNEL_TOKEN_DURATION ChannelServiceError_ErrorCode = 4 ChannelServiceError_APPID_ALIAS_REQUIRED ChannelServiceError_ErrorCode = 5 ) var ChannelServiceError_ErrorCode_name = map[int32]string{ 0: "OK", 1: "INTERNAL_ERROR", 2: "INVALID_CHANNEL_KEY", 3: "BAD_MESSAGE", 4: "INVALID_CHANNEL_TOKEN_DURATION", 5: "APPID_ALIAS_REQUIRED", } var ChannelServiceError_ErrorCode_value = map[string]int32{ "OK": 0, "INTERNAL_ERROR": 1, "INVALID_CHANNEL_KEY": 2, "BAD_MESSAGE": 3, "INVALID_CHANNEL_TOKEN_DURATION": 4, "APPID_ALIAS_REQUIRED": 5, } func (x ChannelServiceError_ErrorCode) Enum() *ChannelServiceError_ErrorCode { p := new(ChannelServiceError_ErrorCode) *p = x return p } func (x ChannelServiceError_ErrorCode) String() string { return proto.EnumName(ChannelServiceError_ErrorCode_name, int32(x)) } func (x *ChannelServiceError_ErrorCode) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(ChannelServiceError_ErrorCode_value, data, "ChannelServiceError_ErrorCode") if err != nil { return err } *x = ChannelServiceError_ErrorCode(value) return nil } type ChannelServiceError struct { XXX_unrecognized []byte `json:"-"` } func (m *ChannelServiceError) Reset() { *m = ChannelServiceError{} } func (m *ChannelServiceError) String() string { return proto.CompactTextString(m) } func (*ChannelServiceError) ProtoMessage() {} type CreateChannelRequest struct { ApplicationKey *string `protobuf:"bytes,1,req,name=application_key" json:"application_key,omitempty"` DurationMinutes *int32 `protobuf:"varint,2,opt,name=duration_minutes" json:"duration_minutes,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *CreateChannelRequest) Reset() { *m = CreateChannelRequest{} } func (m *CreateChannelRequest) String() string { return proto.CompactTextString(m) } func (*CreateChannelRequest) ProtoMessage() {} func (m *CreateChannelRequest) GetApplicationKey() string { if m != nil && m.ApplicationKey != nil { return *m.ApplicationKey } return "" } func (m *CreateChannelRequest) GetDurationMinutes() int32 { if m != nil && m.DurationMinutes != nil { return *m.DurationMinutes } return 0 } type CreateChannelResponse struct { Token *string `protobuf:"bytes,2,opt,name=token" json:"token,omitempty"` DurationMinutes *int32 `protobuf:"varint,3,opt,name=duration_minutes" json:"duration_minutes,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *CreateChannelResponse) Reset() { *m = CreateChannelResponse{} } func (m *CreateChannelResponse) String() string { return proto.CompactTextString(m) } func (*CreateChannelResponse) ProtoMessage() {} func (m *CreateChannelResponse) GetToken() string { if m != nil && m.Token != nil { return *m.Token } return "" } func (m *CreateChannelResponse) GetDurationMinutes() int32 { if m != nil && m.DurationMinutes != nil { return *m.DurationMinutes } return 0 } type SendMessageRequest struct { ApplicationKey *string `protobuf:"bytes,1,req,name=application_key" json:"application_key,omitempty"` Message *string `protobuf:"bytes,2,req,name=message" json:"message,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *SendMessageRequest) Reset() { *m = SendMessageRequest{} } func (m *SendMessageRequest) String() string { return proto.CompactTextString(m) } func (*SendMessageRequest) ProtoMessage() {} func (m *SendMessageRequest) GetApplicationKey() string { if m != nil && m.ApplicationKey != nil { return *m.ApplicationKey } return "" } func (m *SendMessageRequest) GetMessage() string { if m != nil && m.Message != nil { return *m.Message } return "" } func init() { } ================================================ FILE: vendor/google.golang.org/appengine/internal/channel/channel_service.proto ================================================ syntax = "proto2"; option go_package = "channel"; package appengine; message ChannelServiceError { enum ErrorCode { OK = 0; INTERNAL_ERROR = 1; INVALID_CHANNEL_KEY = 2; BAD_MESSAGE = 3; INVALID_CHANNEL_TOKEN_DURATION = 4; APPID_ALIAS_REQUIRED = 5; } } message CreateChannelRequest { required string application_key = 1; optional int32 duration_minutes = 2; } message CreateChannelResponse { optional string token = 2; optional int32 duration_minutes = 3; } message SendMessageRequest { required string application_key = 1; required string message = 2; } ================================================ FILE: vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go ================================================ // Code generated by protoc-gen-go. // source: google.golang.org/appengine/internal/datastore/datastore_v3.proto // DO NOT EDIT! /* Package datastore is a generated protocol buffer package. It is generated from these files: google.golang.org/appengine/internal/datastore/datastore_v3.proto It has these top-level messages: Action PropertyValue Property Path Reference User EntityProto CompositeProperty Index CompositeIndex IndexPostfix IndexPosition Snapshot InternalHeader Transaction Query CompiledQuery CompiledCursor Cursor Error Cost GetRequest GetResponse PutRequest PutResponse TouchRequest TouchResponse DeleteRequest DeleteResponse NextRequest QueryResult AllocateIdsRequest AllocateIdsResponse CompositeIndices AddActionsRequest AddActionsResponse BeginTransactionRequest CommitResponse */ package datastore import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf type Property_Meaning int32 const ( Property_NO_MEANING Property_Meaning = 0 Property_BLOB Property_Meaning = 14 Property_TEXT Property_Meaning = 15 Property_BYTESTRING Property_Meaning = 16 Property_ATOM_CATEGORY Property_Meaning = 1 Property_ATOM_LINK Property_Meaning = 2 Property_ATOM_TITLE Property_Meaning = 3 Property_ATOM_CONTENT Property_Meaning = 4 Property_ATOM_SUMMARY Property_Meaning = 5 Property_ATOM_AUTHOR Property_Meaning = 6 Property_GD_WHEN Property_Meaning = 7 Property_GD_EMAIL Property_Meaning = 8 Property_GEORSS_POINT Property_Meaning = 9 Property_GD_IM Property_Meaning = 10 Property_GD_PHONENUMBER Property_Meaning = 11 Property_GD_POSTALADDRESS Property_Meaning = 12 Property_GD_RATING Property_Meaning = 13 Property_BLOBKEY Property_Meaning = 17 Property_ENTITY_PROTO Property_Meaning = 19 Property_INDEX_VALUE Property_Meaning = 18 ) var Property_Meaning_name = map[int32]string{ 0: "NO_MEANING", 14: "BLOB", 15: "TEXT", 16: "BYTESTRING", 1: "ATOM_CATEGORY", 2: "ATOM_LINK", 3: "ATOM_TITLE", 4: "ATOM_CONTENT", 5: "ATOM_SUMMARY", 6: "ATOM_AUTHOR", 7: "GD_WHEN", 8: "GD_EMAIL", 9: "GEORSS_POINT", 10: "GD_IM", 11: "GD_PHONENUMBER", 12: "GD_POSTALADDRESS", 13: "GD_RATING", 17: "BLOBKEY", 19: "ENTITY_PROTO", 18: "INDEX_VALUE", } var Property_Meaning_value = map[string]int32{ "NO_MEANING": 0, "BLOB": 14, "TEXT": 15, "BYTESTRING": 16, "ATOM_CATEGORY": 1, "ATOM_LINK": 2, "ATOM_TITLE": 3, "ATOM_CONTENT": 4, "ATOM_SUMMARY": 5, "ATOM_AUTHOR": 6, "GD_WHEN": 7, "GD_EMAIL": 8, "GEORSS_POINT": 9, "GD_IM": 10, "GD_PHONENUMBER": 11, "GD_POSTALADDRESS": 12, "GD_RATING": 13, "BLOBKEY": 17, "ENTITY_PROTO": 19, "INDEX_VALUE": 18, } func (x Property_Meaning) Enum() *Property_Meaning { p := new(Property_Meaning) *p = x return p } func (x Property_Meaning) String() string { return proto.EnumName(Property_Meaning_name, int32(x)) } func (x *Property_Meaning) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(Property_Meaning_value, data, "Property_Meaning") if err != nil { return err } *x = Property_Meaning(value) return nil } type Property_FtsTokenizationOption int32 const ( Property_HTML Property_FtsTokenizationOption = 1 Property_ATOM Property_FtsTokenizationOption = 2 ) var Property_FtsTokenizationOption_name = map[int32]string{ 1: "HTML", 2: "ATOM", } var Property_FtsTokenizationOption_value = map[string]int32{ "HTML": 1, "ATOM": 2, } func (x Property_FtsTokenizationOption) Enum() *Property_FtsTokenizationOption { p := new(Property_FtsTokenizationOption) *p = x return p } func (x Property_FtsTokenizationOption) String() string { return proto.EnumName(Property_FtsTokenizationOption_name, int32(x)) } func (x *Property_FtsTokenizationOption) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(Property_FtsTokenizationOption_value, data, "Property_FtsTokenizationOption") if err != nil { return err } *x = Property_FtsTokenizationOption(value) return nil } type EntityProto_Kind int32 const ( EntityProto_GD_CONTACT EntityProto_Kind = 1 EntityProto_GD_EVENT EntityProto_Kind = 2 EntityProto_GD_MESSAGE EntityProto_Kind = 3 ) var EntityProto_Kind_name = map[int32]string{ 1: "GD_CONTACT", 2: "GD_EVENT", 3: "GD_MESSAGE", } var EntityProto_Kind_value = map[string]int32{ "GD_CONTACT": 1, "GD_EVENT": 2, "GD_MESSAGE": 3, } func (x EntityProto_Kind) Enum() *EntityProto_Kind { p := new(EntityProto_Kind) *p = x return p } func (x EntityProto_Kind) String() string { return proto.EnumName(EntityProto_Kind_name, int32(x)) } func (x *EntityProto_Kind) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(EntityProto_Kind_value, data, "EntityProto_Kind") if err != nil { return err } *x = EntityProto_Kind(value) return nil } type Index_Property_Direction int32 const ( Index_Property_ASCENDING Index_Property_Direction = 1 Index_Property_DESCENDING Index_Property_Direction = 2 ) var Index_Property_Direction_name = map[int32]string{ 1: "ASCENDING", 2: "DESCENDING", } var Index_Property_Direction_value = map[string]int32{ "ASCENDING": 1, "DESCENDING": 2, } func (x Index_Property_Direction) Enum() *Index_Property_Direction { p := new(Index_Property_Direction) *p = x return p } func (x Index_Property_Direction) String() string { return proto.EnumName(Index_Property_Direction_name, int32(x)) } func (x *Index_Property_Direction) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(Index_Property_Direction_value, data, "Index_Property_Direction") if err != nil { return err } *x = Index_Property_Direction(value) return nil } type CompositeIndex_State int32 const ( CompositeIndex_WRITE_ONLY CompositeIndex_State = 1 CompositeIndex_READ_WRITE CompositeIndex_State = 2 CompositeIndex_DELETED CompositeIndex_State = 3 CompositeIndex_ERROR CompositeIndex_State = 4 ) var CompositeIndex_State_name = map[int32]string{ 1: "WRITE_ONLY", 2: "READ_WRITE", 3: "DELETED", 4: "ERROR", } var CompositeIndex_State_value = map[string]int32{ "WRITE_ONLY": 1, "READ_WRITE": 2, "DELETED": 3, "ERROR": 4, } func (x CompositeIndex_State) Enum() *CompositeIndex_State { p := new(CompositeIndex_State) *p = x return p } func (x CompositeIndex_State) String() string { return proto.EnumName(CompositeIndex_State_name, int32(x)) } func (x *CompositeIndex_State) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(CompositeIndex_State_value, data, "CompositeIndex_State") if err != nil { return err } *x = CompositeIndex_State(value) return nil } type Snapshot_Status int32 const ( Snapshot_INACTIVE Snapshot_Status = 0 Snapshot_ACTIVE Snapshot_Status = 1 ) var Snapshot_Status_name = map[int32]string{ 0: "INACTIVE", 1: "ACTIVE", } var Snapshot_Status_value = map[string]int32{ "INACTIVE": 0, "ACTIVE": 1, } func (x Snapshot_Status) Enum() *Snapshot_Status { p := new(Snapshot_Status) *p = x return p } func (x Snapshot_Status) String() string { return proto.EnumName(Snapshot_Status_name, int32(x)) } func (x *Snapshot_Status) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(Snapshot_Status_value, data, "Snapshot_Status") if err != nil { return err } *x = Snapshot_Status(value) return nil } type Query_Hint int32 const ( Query_ORDER_FIRST Query_Hint = 1 Query_ANCESTOR_FIRST Query_Hint = 2 Query_FILTER_FIRST Query_Hint = 3 ) var Query_Hint_name = map[int32]string{ 1: "ORDER_FIRST", 2: "ANCESTOR_FIRST", 3: "FILTER_FIRST", } var Query_Hint_value = map[string]int32{ "ORDER_FIRST": 1, "ANCESTOR_FIRST": 2, "FILTER_FIRST": 3, } func (x Query_Hint) Enum() *Query_Hint { p := new(Query_Hint) *p = x return p } func (x Query_Hint) String() string { return proto.EnumName(Query_Hint_name, int32(x)) } func (x *Query_Hint) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(Query_Hint_value, data, "Query_Hint") if err != nil { return err } *x = Query_Hint(value) return nil } type Query_Filter_Operator int32 const ( Query_Filter_LESS_THAN Query_Filter_Operator = 1 Query_Filter_LESS_THAN_OR_EQUAL Query_Filter_Operator = 2 Query_Filter_GREATER_THAN Query_Filter_Operator = 3 Query_Filter_GREATER_THAN_OR_EQUAL Query_Filter_Operator = 4 Query_Filter_EQUAL Query_Filter_Operator = 5 Query_Filter_IN Query_Filter_Operator = 6 Query_Filter_EXISTS Query_Filter_Operator = 7 ) var Query_Filter_Operator_name = map[int32]string{ 1: "LESS_THAN", 2: "LESS_THAN_OR_EQUAL", 3: "GREATER_THAN", 4: "GREATER_THAN_OR_EQUAL", 5: "EQUAL", 6: "IN", 7: "EXISTS", } var Query_Filter_Operator_value = map[string]int32{ "LESS_THAN": 1, "LESS_THAN_OR_EQUAL": 2, "GREATER_THAN": 3, "GREATER_THAN_OR_EQUAL": 4, "EQUAL": 5, "IN": 6, "EXISTS": 7, } func (x Query_Filter_Operator) Enum() *Query_Filter_Operator { p := new(Query_Filter_Operator) *p = x return p } func (x Query_Filter_Operator) String() string { return proto.EnumName(Query_Filter_Operator_name, int32(x)) } func (x *Query_Filter_Operator) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(Query_Filter_Operator_value, data, "Query_Filter_Operator") if err != nil { return err } *x = Query_Filter_Operator(value) return nil } type Query_Order_Direction int32 const ( Query_Order_ASCENDING Query_Order_Direction = 1 Query_Order_DESCENDING Query_Order_Direction = 2 ) var Query_Order_Direction_name = map[int32]string{ 1: "ASCENDING", 2: "DESCENDING", } var Query_Order_Direction_value = map[string]int32{ "ASCENDING": 1, "DESCENDING": 2, } func (x Query_Order_Direction) Enum() *Query_Order_Direction { p := new(Query_Order_Direction) *p = x return p } func (x Query_Order_Direction) String() string { return proto.EnumName(Query_Order_Direction_name, int32(x)) } func (x *Query_Order_Direction) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(Query_Order_Direction_value, data, "Query_Order_Direction") if err != nil { return err } *x = Query_Order_Direction(value) return nil } type Error_ErrorCode int32 const ( Error_BAD_REQUEST Error_ErrorCode = 1 Error_CONCURRENT_TRANSACTION Error_ErrorCode = 2 Error_INTERNAL_ERROR Error_ErrorCode = 3 Error_NEED_INDEX Error_ErrorCode = 4 Error_TIMEOUT Error_ErrorCode = 5 Error_PERMISSION_DENIED Error_ErrorCode = 6 Error_BIGTABLE_ERROR Error_ErrorCode = 7 Error_COMMITTED_BUT_STILL_APPLYING Error_ErrorCode = 8 Error_CAPABILITY_DISABLED Error_ErrorCode = 9 Error_TRY_ALTERNATE_BACKEND Error_ErrorCode = 10 Error_SAFE_TIME_TOO_OLD Error_ErrorCode = 11 ) var Error_ErrorCode_name = map[int32]string{ 1: "BAD_REQUEST", 2: "CONCURRENT_TRANSACTION", 3: "INTERNAL_ERROR", 4: "NEED_INDEX", 5: "TIMEOUT", 6: "PERMISSION_DENIED", 7: "BIGTABLE_ERROR", 8: "COMMITTED_BUT_STILL_APPLYING", 9: "CAPABILITY_DISABLED", 10: "TRY_ALTERNATE_BACKEND", 11: "SAFE_TIME_TOO_OLD", } var Error_ErrorCode_value = map[string]int32{ "BAD_REQUEST": 1, "CONCURRENT_TRANSACTION": 2, "INTERNAL_ERROR": 3, "NEED_INDEX": 4, "TIMEOUT": 5, "PERMISSION_DENIED": 6, "BIGTABLE_ERROR": 7, "COMMITTED_BUT_STILL_APPLYING": 8, "CAPABILITY_DISABLED": 9, "TRY_ALTERNATE_BACKEND": 10, "SAFE_TIME_TOO_OLD": 11, } func (x Error_ErrorCode) Enum() *Error_ErrorCode { p := new(Error_ErrorCode) *p = x return p } func (x Error_ErrorCode) String() string { return proto.EnumName(Error_ErrorCode_name, int32(x)) } func (x *Error_ErrorCode) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(Error_ErrorCode_value, data, "Error_ErrorCode") if err != nil { return err } *x = Error_ErrorCode(value) return nil } type PutRequest_AutoIdPolicy int32 const ( PutRequest_CURRENT PutRequest_AutoIdPolicy = 0 PutRequest_SEQUENTIAL PutRequest_AutoIdPolicy = 1 ) var PutRequest_AutoIdPolicy_name = map[int32]string{ 0: "CURRENT", 1: "SEQUENTIAL", } var PutRequest_AutoIdPolicy_value = map[string]int32{ "CURRENT": 0, "SEQUENTIAL": 1, } func (x PutRequest_AutoIdPolicy) Enum() *PutRequest_AutoIdPolicy { p := new(PutRequest_AutoIdPolicy) *p = x return p } func (x PutRequest_AutoIdPolicy) String() string { return proto.EnumName(PutRequest_AutoIdPolicy_name, int32(x)) } func (x *PutRequest_AutoIdPolicy) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(PutRequest_AutoIdPolicy_value, data, "PutRequest_AutoIdPolicy") if err != nil { return err } *x = PutRequest_AutoIdPolicy(value) return nil } type Action struct { XXX_unrecognized []byte `json:"-"` } func (m *Action) Reset() { *m = Action{} } func (m *Action) String() string { return proto.CompactTextString(m) } func (*Action) ProtoMessage() {} type PropertyValue struct { Int64Value *int64 `protobuf:"varint,1,opt,name=int64Value" json:"int64Value,omitempty"` BooleanValue *bool `protobuf:"varint,2,opt,name=booleanValue" json:"booleanValue,omitempty"` StringValue *string `protobuf:"bytes,3,opt,name=stringValue" json:"stringValue,omitempty"` DoubleValue *float64 `protobuf:"fixed64,4,opt,name=doubleValue" json:"doubleValue,omitempty"` Pointvalue *PropertyValue_PointValue `protobuf:"group,5,opt,name=PointValue" json:"pointvalue,omitempty"` Uservalue *PropertyValue_UserValue `protobuf:"group,8,opt,name=UserValue" json:"uservalue,omitempty"` Referencevalue *PropertyValue_ReferenceValue `protobuf:"group,12,opt,name=ReferenceValue" json:"referencevalue,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *PropertyValue) Reset() { *m = PropertyValue{} } func (m *PropertyValue) String() string { return proto.CompactTextString(m) } func (*PropertyValue) ProtoMessage() {} func (m *PropertyValue) GetInt64Value() int64 { if m != nil && m.Int64Value != nil { return *m.Int64Value } return 0 } func (m *PropertyValue) GetBooleanValue() bool { if m != nil && m.BooleanValue != nil { return *m.BooleanValue } return false } func (m *PropertyValue) GetStringValue() string { if m != nil && m.StringValue != nil { return *m.StringValue } return "" } func (m *PropertyValue) GetDoubleValue() float64 { if m != nil && m.DoubleValue != nil { return *m.DoubleValue } return 0 } func (m *PropertyValue) GetPointvalue() *PropertyValue_PointValue { if m != nil { return m.Pointvalue } return nil } func (m *PropertyValue) GetUservalue() *PropertyValue_UserValue { if m != nil { return m.Uservalue } return nil } func (m *PropertyValue) GetReferencevalue() *PropertyValue_ReferenceValue { if m != nil { return m.Referencevalue } return nil } type PropertyValue_PointValue struct { X *float64 `protobuf:"fixed64,6,req,name=x" json:"x,omitempty"` Y *float64 `protobuf:"fixed64,7,req,name=y" json:"y,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *PropertyValue_PointValue) Reset() { *m = PropertyValue_PointValue{} } func (m *PropertyValue_PointValue) String() string { return proto.CompactTextString(m) } func (*PropertyValue_PointValue) ProtoMessage() {} func (m *PropertyValue_PointValue) GetX() float64 { if m != nil && m.X != nil { return *m.X } return 0 } func (m *PropertyValue_PointValue) GetY() float64 { if m != nil && m.Y != nil { return *m.Y } return 0 } type PropertyValue_UserValue struct { Email *string `protobuf:"bytes,9,req,name=email" json:"email,omitempty"` AuthDomain *string `protobuf:"bytes,10,req,name=auth_domain" json:"auth_domain,omitempty"` Nickname *string `protobuf:"bytes,11,opt,name=nickname" json:"nickname,omitempty"` FederatedIdentity *string `protobuf:"bytes,21,opt,name=federated_identity" json:"federated_identity,omitempty"` FederatedProvider *string `protobuf:"bytes,22,opt,name=federated_provider" json:"federated_provider,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *PropertyValue_UserValue) Reset() { *m = PropertyValue_UserValue{} } func (m *PropertyValue_UserValue) String() string { return proto.CompactTextString(m) } func (*PropertyValue_UserValue) ProtoMessage() {} func (m *PropertyValue_UserValue) GetEmail() string { if m != nil && m.Email != nil { return *m.Email } return "" } func (m *PropertyValue_UserValue) GetAuthDomain() string { if m != nil && m.AuthDomain != nil { return *m.AuthDomain } return "" } func (m *PropertyValue_UserValue) GetNickname() string { if m != nil && m.Nickname != nil { return *m.Nickname } return "" } func (m *PropertyValue_UserValue) GetFederatedIdentity() string { if m != nil && m.FederatedIdentity != nil { return *m.FederatedIdentity } return "" } func (m *PropertyValue_UserValue) GetFederatedProvider() string { if m != nil && m.FederatedProvider != nil { return *m.FederatedProvider } return "" } type PropertyValue_ReferenceValue struct { App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"` NameSpace *string `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"` Pathelement []*PropertyValue_ReferenceValue_PathElement `protobuf:"group,14,rep,name=PathElement" json:"pathelement,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *PropertyValue_ReferenceValue) Reset() { *m = PropertyValue_ReferenceValue{} } func (m *PropertyValue_ReferenceValue) String() string { return proto.CompactTextString(m) } func (*PropertyValue_ReferenceValue) ProtoMessage() {} func (m *PropertyValue_ReferenceValue) GetApp() string { if m != nil && m.App != nil { return *m.App } return "" } func (m *PropertyValue_ReferenceValue) GetNameSpace() string { if m != nil && m.NameSpace != nil { return *m.NameSpace } return "" } func (m *PropertyValue_ReferenceValue) GetPathelement() []*PropertyValue_ReferenceValue_PathElement { if m != nil { return m.Pathelement } return nil } type PropertyValue_ReferenceValue_PathElement struct { Type *string `protobuf:"bytes,15,req,name=type" json:"type,omitempty"` Id *int64 `protobuf:"varint,16,opt,name=id" json:"id,omitempty"` Name *string `protobuf:"bytes,17,opt,name=name" json:"name,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *PropertyValue_ReferenceValue_PathElement) Reset() { *m = PropertyValue_ReferenceValue_PathElement{} } func (m *PropertyValue_ReferenceValue_PathElement) String() string { return proto.CompactTextString(m) } func (*PropertyValue_ReferenceValue_PathElement) ProtoMessage() {} func (m *PropertyValue_ReferenceValue_PathElement) GetType() string { if m != nil && m.Type != nil { return *m.Type } return "" } func (m *PropertyValue_ReferenceValue_PathElement) GetId() int64 { if m != nil && m.Id != nil { return *m.Id } return 0 } func (m *PropertyValue_ReferenceValue_PathElement) GetName() string { if m != nil && m.Name != nil { return *m.Name } return "" } type Property struct { Meaning *Property_Meaning `protobuf:"varint,1,opt,name=meaning,enum=appengine.Property_Meaning,def=0" json:"meaning,omitempty"` MeaningUri *string `protobuf:"bytes,2,opt,name=meaning_uri" json:"meaning_uri,omitempty"` Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"` Value *PropertyValue `protobuf:"bytes,5,req,name=value" json:"value,omitempty"` Multiple *bool `protobuf:"varint,4,req,name=multiple" json:"multiple,omitempty"` Searchable *bool `protobuf:"varint,6,opt,name=searchable,def=0" json:"searchable,omitempty"` FtsTokenizationOption *Property_FtsTokenizationOption `protobuf:"varint,8,opt,name=fts_tokenization_option,enum=appengine.Property_FtsTokenizationOption" json:"fts_tokenization_option,omitempty"` Locale *string `protobuf:"bytes,9,opt,name=locale,def=en" json:"locale,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Property) Reset() { *m = Property{} } func (m *Property) String() string { return proto.CompactTextString(m) } func (*Property) ProtoMessage() {} const Default_Property_Meaning Property_Meaning = Property_NO_MEANING const Default_Property_Searchable bool = false const Default_Property_Locale string = "en" func (m *Property) GetMeaning() Property_Meaning { if m != nil && m.Meaning != nil { return *m.Meaning } return Default_Property_Meaning } func (m *Property) GetMeaningUri() string { if m != nil && m.MeaningUri != nil { return *m.MeaningUri } return "" } func (m *Property) GetName() string { if m != nil && m.Name != nil { return *m.Name } return "" } func (m *Property) GetValue() *PropertyValue { if m != nil { return m.Value } return nil } func (m *Property) GetMultiple() bool { if m != nil && m.Multiple != nil { return *m.Multiple } return false } func (m *Property) GetSearchable() bool { if m != nil && m.Searchable != nil { return *m.Searchable } return Default_Property_Searchable } func (m *Property) GetFtsTokenizationOption() Property_FtsTokenizationOption { if m != nil && m.FtsTokenizationOption != nil { return *m.FtsTokenizationOption } return Property_HTML } func (m *Property) GetLocale() string { if m != nil && m.Locale != nil { return *m.Locale } return Default_Property_Locale } type Path struct { Element []*Path_Element `protobuf:"group,1,rep,name=Element" json:"element,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Path) Reset() { *m = Path{} } func (m *Path) String() string { return proto.CompactTextString(m) } func (*Path) ProtoMessage() {} func (m *Path) GetElement() []*Path_Element { if m != nil { return m.Element } return nil } type Path_Element struct { Type *string `protobuf:"bytes,2,req,name=type" json:"type,omitempty"` Id *int64 `protobuf:"varint,3,opt,name=id" json:"id,omitempty"` Name *string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Path_Element) Reset() { *m = Path_Element{} } func (m *Path_Element) String() string { return proto.CompactTextString(m) } func (*Path_Element) ProtoMessage() {} func (m *Path_Element) GetType() string { if m != nil && m.Type != nil { return *m.Type } return "" } func (m *Path_Element) GetId() int64 { if m != nil && m.Id != nil { return *m.Id } return 0 } func (m *Path_Element) GetName() string { if m != nil && m.Name != nil { return *m.Name } return "" } type Reference struct { App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"` NameSpace *string `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"` Path *Path `protobuf:"bytes,14,req,name=path" json:"path,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Reference) Reset() { *m = Reference{} } func (m *Reference) String() string { return proto.CompactTextString(m) } func (*Reference) ProtoMessage() {} func (m *Reference) GetApp() string { if m != nil && m.App != nil { return *m.App } return "" } func (m *Reference) GetNameSpace() string { if m != nil && m.NameSpace != nil { return *m.NameSpace } return "" } func (m *Reference) GetPath() *Path { if m != nil { return m.Path } return nil } type User struct { Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"` AuthDomain *string `protobuf:"bytes,2,req,name=auth_domain" json:"auth_domain,omitempty"` Nickname *string `protobuf:"bytes,3,opt,name=nickname" json:"nickname,omitempty"` FederatedIdentity *string `protobuf:"bytes,6,opt,name=federated_identity" json:"federated_identity,omitempty"` FederatedProvider *string `protobuf:"bytes,7,opt,name=federated_provider" json:"federated_provider,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *User) Reset() { *m = User{} } func (m *User) String() string { return proto.CompactTextString(m) } func (*User) ProtoMessage() {} func (m *User) GetEmail() string { if m != nil && m.Email != nil { return *m.Email } return "" } func (m *User) GetAuthDomain() string { if m != nil && m.AuthDomain != nil { return *m.AuthDomain } return "" } func (m *User) GetNickname() string { if m != nil && m.Nickname != nil { return *m.Nickname } return "" } func (m *User) GetFederatedIdentity() string { if m != nil && m.FederatedIdentity != nil { return *m.FederatedIdentity } return "" } func (m *User) GetFederatedProvider() string { if m != nil && m.FederatedProvider != nil { return *m.FederatedProvider } return "" } type EntityProto struct { Key *Reference `protobuf:"bytes,13,req,name=key" json:"key,omitempty"` EntityGroup *Path `protobuf:"bytes,16,req,name=entity_group" json:"entity_group,omitempty"` Owner *User `protobuf:"bytes,17,opt,name=owner" json:"owner,omitempty"` Kind *EntityProto_Kind `protobuf:"varint,4,opt,name=kind,enum=appengine.EntityProto_Kind" json:"kind,omitempty"` KindUri *string `protobuf:"bytes,5,opt,name=kind_uri" json:"kind_uri,omitempty"` Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"` RawProperty []*Property `protobuf:"bytes,15,rep,name=raw_property" json:"raw_property,omitempty"` Rank *int32 `protobuf:"varint,18,opt,name=rank" json:"rank,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *EntityProto) Reset() { *m = EntityProto{} } func (m *EntityProto) String() string { return proto.CompactTextString(m) } func (*EntityProto) ProtoMessage() {} func (m *EntityProto) GetKey() *Reference { if m != nil { return m.Key } return nil } func (m *EntityProto) GetEntityGroup() *Path { if m != nil { return m.EntityGroup } return nil } func (m *EntityProto) GetOwner() *User { if m != nil { return m.Owner } return nil } func (m *EntityProto) GetKind() EntityProto_Kind { if m != nil && m.Kind != nil { return *m.Kind } return EntityProto_GD_CONTACT } func (m *EntityProto) GetKindUri() string { if m != nil && m.KindUri != nil { return *m.KindUri } return "" } func (m *EntityProto) GetProperty() []*Property { if m != nil { return m.Property } return nil } func (m *EntityProto) GetRawProperty() []*Property { if m != nil { return m.RawProperty } return nil } func (m *EntityProto) GetRank() int32 { if m != nil && m.Rank != nil { return *m.Rank } return 0 } type CompositeProperty struct { IndexId *int64 `protobuf:"varint,1,req,name=index_id" json:"index_id,omitempty"` Value []string `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *CompositeProperty) Reset() { *m = CompositeProperty{} } func (m *CompositeProperty) String() string { return proto.CompactTextString(m) } func (*CompositeProperty) ProtoMessage() {} func (m *CompositeProperty) GetIndexId() int64 { if m != nil && m.IndexId != nil { return *m.IndexId } return 0 } func (m *CompositeProperty) GetValue() []string { if m != nil { return m.Value } return nil } type Index struct { EntityType *string `protobuf:"bytes,1,req,name=entity_type" json:"entity_type,omitempty"` Ancestor *bool `protobuf:"varint,5,req,name=ancestor" json:"ancestor,omitempty"` Property []*Index_Property `protobuf:"group,2,rep,name=Property" json:"property,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Index) Reset() { *m = Index{} } func (m *Index) String() string { return proto.CompactTextString(m) } func (*Index) ProtoMessage() {} func (m *Index) GetEntityType() string { if m != nil && m.EntityType != nil { return *m.EntityType } return "" } func (m *Index) GetAncestor() bool { if m != nil && m.Ancestor != nil { return *m.Ancestor } return false } func (m *Index) GetProperty() []*Index_Property { if m != nil { return m.Property } return nil } type Index_Property struct { Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"` Direction *Index_Property_Direction `protobuf:"varint,4,opt,name=direction,enum=appengine.Index_Property_Direction,def=1" json:"direction,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Index_Property) Reset() { *m = Index_Property{} } func (m *Index_Property) String() string { return proto.CompactTextString(m) } func (*Index_Property) ProtoMessage() {} const Default_Index_Property_Direction Index_Property_Direction = Index_Property_ASCENDING func (m *Index_Property) GetName() string { if m != nil && m.Name != nil { return *m.Name } return "" } func (m *Index_Property) GetDirection() Index_Property_Direction { if m != nil && m.Direction != nil { return *m.Direction } return Default_Index_Property_Direction } type CompositeIndex struct { AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` Id *int64 `protobuf:"varint,2,req,name=id" json:"id,omitempty"` Definition *Index `protobuf:"bytes,3,req,name=definition" json:"definition,omitempty"` State *CompositeIndex_State `protobuf:"varint,4,req,name=state,enum=appengine.CompositeIndex_State" json:"state,omitempty"` OnlyUseIfRequired *bool `protobuf:"varint,6,opt,name=only_use_if_required,def=0" json:"only_use_if_required,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *CompositeIndex) Reset() { *m = CompositeIndex{} } func (m *CompositeIndex) String() string { return proto.CompactTextString(m) } func (*CompositeIndex) ProtoMessage() {} const Default_CompositeIndex_OnlyUseIfRequired bool = false func (m *CompositeIndex) GetAppId() string { if m != nil && m.AppId != nil { return *m.AppId } return "" } func (m *CompositeIndex) GetId() int64 { if m != nil && m.Id != nil { return *m.Id } return 0 } func (m *CompositeIndex) GetDefinition() *Index { if m != nil { return m.Definition } return nil } func (m *CompositeIndex) GetState() CompositeIndex_State { if m != nil && m.State != nil { return *m.State } return CompositeIndex_WRITE_ONLY } func (m *CompositeIndex) GetOnlyUseIfRequired() bool { if m != nil && m.OnlyUseIfRequired != nil { return *m.OnlyUseIfRequired } return Default_CompositeIndex_OnlyUseIfRequired } type IndexPostfix struct { IndexValue []*IndexPostfix_IndexValue `protobuf:"bytes,1,rep,name=index_value" json:"index_value,omitempty"` Key *Reference `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"` Before *bool `protobuf:"varint,3,opt,name=before,def=1" json:"before,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *IndexPostfix) Reset() { *m = IndexPostfix{} } func (m *IndexPostfix) String() string { return proto.CompactTextString(m) } func (*IndexPostfix) ProtoMessage() {} const Default_IndexPostfix_Before bool = true func (m *IndexPostfix) GetIndexValue() []*IndexPostfix_IndexValue { if m != nil { return m.IndexValue } return nil } func (m *IndexPostfix) GetKey() *Reference { if m != nil { return m.Key } return nil } func (m *IndexPostfix) GetBefore() bool { if m != nil && m.Before != nil { return *m.Before } return Default_IndexPostfix_Before } type IndexPostfix_IndexValue struct { PropertyName *string `protobuf:"bytes,1,req,name=property_name" json:"property_name,omitempty"` Value *PropertyValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *IndexPostfix_IndexValue) Reset() { *m = IndexPostfix_IndexValue{} } func (m *IndexPostfix_IndexValue) String() string { return proto.CompactTextString(m) } func (*IndexPostfix_IndexValue) ProtoMessage() {} func (m *IndexPostfix_IndexValue) GetPropertyName() string { if m != nil && m.PropertyName != nil { return *m.PropertyName } return "" } func (m *IndexPostfix_IndexValue) GetValue() *PropertyValue { if m != nil { return m.Value } return nil } type IndexPosition struct { Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` Before *bool `protobuf:"varint,2,opt,name=before,def=1" json:"before,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *IndexPosition) Reset() { *m = IndexPosition{} } func (m *IndexPosition) String() string { return proto.CompactTextString(m) } func (*IndexPosition) ProtoMessage() {} const Default_IndexPosition_Before bool = true func (m *IndexPosition) GetKey() string { if m != nil && m.Key != nil { return *m.Key } return "" } func (m *IndexPosition) GetBefore() bool { if m != nil && m.Before != nil { return *m.Before } return Default_IndexPosition_Before } type Snapshot struct { Ts *int64 `protobuf:"varint,1,req,name=ts" json:"ts,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Snapshot) Reset() { *m = Snapshot{} } func (m *Snapshot) String() string { return proto.CompactTextString(m) } func (*Snapshot) ProtoMessage() {} func (m *Snapshot) GetTs() int64 { if m != nil && m.Ts != nil { return *m.Ts } return 0 } type InternalHeader struct { Qos *string `protobuf:"bytes,1,opt,name=qos" json:"qos,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *InternalHeader) Reset() { *m = InternalHeader{} } func (m *InternalHeader) String() string { return proto.CompactTextString(m) } func (*InternalHeader) ProtoMessage() {} func (m *InternalHeader) GetQos() string { if m != nil && m.Qos != nil { return *m.Qos } return "" } type Transaction struct { Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"` Handle *uint64 `protobuf:"fixed64,1,req,name=handle" json:"handle,omitempty"` App *string `protobuf:"bytes,2,req,name=app" json:"app,omitempty"` MarkChanges *bool `protobuf:"varint,3,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Transaction) Reset() { *m = Transaction{} } func (m *Transaction) String() string { return proto.CompactTextString(m) } func (*Transaction) ProtoMessage() {} const Default_Transaction_MarkChanges bool = false func (m *Transaction) GetHeader() *InternalHeader { if m != nil { return m.Header } return nil } func (m *Transaction) GetHandle() uint64 { if m != nil && m.Handle != nil { return *m.Handle } return 0 } func (m *Transaction) GetApp() string { if m != nil && m.App != nil { return *m.App } return "" } func (m *Transaction) GetMarkChanges() bool { if m != nil && m.MarkChanges != nil { return *m.MarkChanges } return Default_Transaction_MarkChanges } type Query struct { Header *InternalHeader `protobuf:"bytes,39,opt,name=header" json:"header,omitempty"` App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"` NameSpace *string `protobuf:"bytes,29,opt,name=name_space" json:"name_space,omitempty"` Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"` Ancestor *Reference `protobuf:"bytes,17,opt,name=ancestor" json:"ancestor,omitempty"` Filter []*Query_Filter `protobuf:"group,4,rep,name=Filter" json:"filter,omitempty"` SearchQuery *string `protobuf:"bytes,8,opt,name=search_query" json:"search_query,omitempty"` Order []*Query_Order `protobuf:"group,9,rep,name=Order" json:"order,omitempty"` Hint *Query_Hint `protobuf:"varint,18,opt,name=hint,enum=appengine.Query_Hint" json:"hint,omitempty"` Count *int32 `protobuf:"varint,23,opt,name=count" json:"count,omitempty"` Offset *int32 `protobuf:"varint,12,opt,name=offset,def=0" json:"offset,omitempty"` Limit *int32 `protobuf:"varint,16,opt,name=limit" json:"limit,omitempty"` CompiledCursor *CompiledCursor `protobuf:"bytes,30,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"` EndCompiledCursor *CompiledCursor `protobuf:"bytes,31,opt,name=end_compiled_cursor" json:"end_compiled_cursor,omitempty"` CompositeIndex []*CompositeIndex `protobuf:"bytes,19,rep,name=composite_index" json:"composite_index,omitempty"` RequirePerfectPlan *bool `protobuf:"varint,20,opt,name=require_perfect_plan,def=0" json:"require_perfect_plan,omitempty"` KeysOnly *bool `protobuf:"varint,21,opt,name=keys_only,def=0" json:"keys_only,omitempty"` Transaction *Transaction `protobuf:"bytes,22,opt,name=transaction" json:"transaction,omitempty"` Compile *bool `protobuf:"varint,25,opt,name=compile,def=0" json:"compile,omitempty"` FailoverMs *int64 `protobuf:"varint,26,opt,name=failover_ms" json:"failover_ms,omitempty"` Strong *bool `protobuf:"varint,32,opt,name=strong" json:"strong,omitempty"` PropertyName []string `protobuf:"bytes,33,rep,name=property_name" json:"property_name,omitempty"` GroupByPropertyName []string `protobuf:"bytes,34,rep,name=group_by_property_name" json:"group_by_property_name,omitempty"` Distinct *bool `protobuf:"varint,24,opt,name=distinct" json:"distinct,omitempty"` MinSafeTimeSeconds *int64 `protobuf:"varint,35,opt,name=min_safe_time_seconds" json:"min_safe_time_seconds,omitempty"` SafeReplicaName []string `protobuf:"bytes,36,rep,name=safe_replica_name" json:"safe_replica_name,omitempty"` PersistOffset *bool `protobuf:"varint,37,opt,name=persist_offset,def=0" json:"persist_offset,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Query) Reset() { *m = Query{} } func (m *Query) String() string { return proto.CompactTextString(m) } func (*Query) ProtoMessage() {} const Default_Query_Offset int32 = 0 const Default_Query_RequirePerfectPlan bool = false const Default_Query_KeysOnly bool = false const Default_Query_Compile bool = false const Default_Query_PersistOffset bool = false func (m *Query) GetHeader() *InternalHeader { if m != nil { return m.Header } return nil } func (m *Query) GetApp() string { if m != nil && m.App != nil { return *m.App } return "" } func (m *Query) GetNameSpace() string { if m != nil && m.NameSpace != nil { return *m.NameSpace } return "" } func (m *Query) GetKind() string { if m != nil && m.Kind != nil { return *m.Kind } return "" } func (m *Query) GetAncestor() *Reference { if m != nil { return m.Ancestor } return nil } func (m *Query) GetFilter() []*Query_Filter { if m != nil { return m.Filter } return nil } func (m *Query) GetSearchQuery() string { if m != nil && m.SearchQuery != nil { return *m.SearchQuery } return "" } func (m *Query) GetOrder() []*Query_Order { if m != nil { return m.Order } return nil } func (m *Query) GetHint() Query_Hint { if m != nil && m.Hint != nil { return *m.Hint } return Query_ORDER_FIRST } func (m *Query) GetCount() int32 { if m != nil && m.Count != nil { return *m.Count } return 0 } func (m *Query) GetOffset() int32 { if m != nil && m.Offset != nil { return *m.Offset } return Default_Query_Offset } func (m *Query) GetLimit() int32 { if m != nil && m.Limit != nil { return *m.Limit } return 0 } func (m *Query) GetCompiledCursor() *CompiledCursor { if m != nil { return m.CompiledCursor } return nil } func (m *Query) GetEndCompiledCursor() *CompiledCursor { if m != nil { return m.EndCompiledCursor } return nil } func (m *Query) GetCompositeIndex() []*CompositeIndex { if m != nil { return m.CompositeIndex } return nil } func (m *Query) GetRequirePerfectPlan() bool { if m != nil && m.RequirePerfectPlan != nil { return *m.RequirePerfectPlan } return Default_Query_RequirePerfectPlan } func (m *Query) GetKeysOnly() bool { if m != nil && m.KeysOnly != nil { return *m.KeysOnly } return Default_Query_KeysOnly } func (m *Query) GetTransaction() *Transaction { if m != nil { return m.Transaction } return nil } func (m *Query) GetCompile() bool { if m != nil && m.Compile != nil { return *m.Compile } return Default_Query_Compile } func (m *Query) GetFailoverMs() int64 { if m != nil && m.FailoverMs != nil { return *m.FailoverMs } return 0 } func (m *Query) GetStrong() bool { if m != nil && m.Strong != nil { return *m.Strong } return false } func (m *Query) GetPropertyName() []string { if m != nil { return m.PropertyName } return nil } func (m *Query) GetGroupByPropertyName() []string { if m != nil { return m.GroupByPropertyName } return nil } func (m *Query) GetDistinct() bool { if m != nil && m.Distinct != nil { return *m.Distinct } return false } func (m *Query) GetMinSafeTimeSeconds() int64 { if m != nil && m.MinSafeTimeSeconds != nil { return *m.MinSafeTimeSeconds } return 0 } func (m *Query) GetSafeReplicaName() []string { if m != nil { return m.SafeReplicaName } return nil } func (m *Query) GetPersistOffset() bool { if m != nil && m.PersistOffset != nil { return *m.PersistOffset } return Default_Query_PersistOffset } type Query_Filter struct { Op *Query_Filter_Operator `protobuf:"varint,6,req,name=op,enum=appengine.Query_Filter_Operator" json:"op,omitempty"` Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Query_Filter) Reset() { *m = Query_Filter{} } func (m *Query_Filter) String() string { return proto.CompactTextString(m) } func (*Query_Filter) ProtoMessage() {} func (m *Query_Filter) GetOp() Query_Filter_Operator { if m != nil && m.Op != nil { return *m.Op } return Query_Filter_LESS_THAN } func (m *Query_Filter) GetProperty() []*Property { if m != nil { return m.Property } return nil } type Query_Order struct { Property *string `protobuf:"bytes,10,req,name=property" json:"property,omitempty"` Direction *Query_Order_Direction `protobuf:"varint,11,opt,name=direction,enum=appengine.Query_Order_Direction,def=1" json:"direction,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Query_Order) Reset() { *m = Query_Order{} } func (m *Query_Order) String() string { return proto.CompactTextString(m) } func (*Query_Order) ProtoMessage() {} const Default_Query_Order_Direction Query_Order_Direction = Query_Order_ASCENDING func (m *Query_Order) GetProperty() string { if m != nil && m.Property != nil { return *m.Property } return "" } func (m *Query_Order) GetDirection() Query_Order_Direction { if m != nil && m.Direction != nil { return *m.Direction } return Default_Query_Order_Direction } type CompiledQuery struct { Primaryscan *CompiledQuery_PrimaryScan `protobuf:"group,1,req,name=PrimaryScan" json:"primaryscan,omitempty"` Mergejoinscan []*CompiledQuery_MergeJoinScan `protobuf:"group,7,rep,name=MergeJoinScan" json:"mergejoinscan,omitempty"` IndexDef *Index `protobuf:"bytes,21,opt,name=index_def" json:"index_def,omitempty"` Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"` Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"` KeysOnly *bool `protobuf:"varint,12,req,name=keys_only" json:"keys_only,omitempty"` PropertyName []string `protobuf:"bytes,24,rep,name=property_name" json:"property_name,omitempty"` DistinctInfixSize *int32 `protobuf:"varint,25,opt,name=distinct_infix_size" json:"distinct_infix_size,omitempty"` Entityfilter *CompiledQuery_EntityFilter `protobuf:"group,13,opt,name=EntityFilter" json:"entityfilter,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *CompiledQuery) Reset() { *m = CompiledQuery{} } func (m *CompiledQuery) String() string { return proto.CompactTextString(m) } func (*CompiledQuery) ProtoMessage() {} const Default_CompiledQuery_Offset int32 = 0 func (m *CompiledQuery) GetPrimaryscan() *CompiledQuery_PrimaryScan { if m != nil { return m.Primaryscan } return nil } func (m *CompiledQuery) GetMergejoinscan() []*CompiledQuery_MergeJoinScan { if m != nil { return m.Mergejoinscan } return nil } func (m *CompiledQuery) GetIndexDef() *Index { if m != nil { return m.IndexDef } return nil } func (m *CompiledQuery) GetOffset() int32 { if m != nil && m.Offset != nil { return *m.Offset } return Default_CompiledQuery_Offset } func (m *CompiledQuery) GetLimit() int32 { if m != nil && m.Limit != nil { return *m.Limit } return 0 } func (m *CompiledQuery) GetKeysOnly() bool { if m != nil && m.KeysOnly != nil { return *m.KeysOnly } return false } func (m *CompiledQuery) GetPropertyName() []string { if m != nil { return m.PropertyName } return nil } func (m *CompiledQuery) GetDistinctInfixSize() int32 { if m != nil && m.DistinctInfixSize != nil { return *m.DistinctInfixSize } return 0 } func (m *CompiledQuery) GetEntityfilter() *CompiledQuery_EntityFilter { if m != nil { return m.Entityfilter } return nil } type CompiledQuery_PrimaryScan struct { IndexName *string `protobuf:"bytes,2,opt,name=index_name" json:"index_name,omitempty"` StartKey *string `protobuf:"bytes,3,opt,name=start_key" json:"start_key,omitempty"` StartInclusive *bool `protobuf:"varint,4,opt,name=start_inclusive" json:"start_inclusive,omitempty"` EndKey *string `protobuf:"bytes,5,opt,name=end_key" json:"end_key,omitempty"` EndInclusive *bool `protobuf:"varint,6,opt,name=end_inclusive" json:"end_inclusive,omitempty"` StartPostfixValue []string `protobuf:"bytes,22,rep,name=start_postfix_value" json:"start_postfix_value,omitempty"` EndPostfixValue []string `protobuf:"bytes,23,rep,name=end_postfix_value" json:"end_postfix_value,omitempty"` EndUnappliedLogTimestampUs *int64 `protobuf:"varint,19,opt,name=end_unapplied_log_timestamp_us" json:"end_unapplied_log_timestamp_us,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *CompiledQuery_PrimaryScan) Reset() { *m = CompiledQuery_PrimaryScan{} } func (m *CompiledQuery_PrimaryScan) String() string { return proto.CompactTextString(m) } func (*CompiledQuery_PrimaryScan) ProtoMessage() {} func (m *CompiledQuery_PrimaryScan) GetIndexName() string { if m != nil && m.IndexName != nil { return *m.IndexName } return "" } func (m *CompiledQuery_PrimaryScan) GetStartKey() string { if m != nil && m.StartKey != nil { return *m.StartKey } return "" } func (m *CompiledQuery_PrimaryScan) GetStartInclusive() bool { if m != nil && m.StartInclusive != nil { return *m.StartInclusive } return false } func (m *CompiledQuery_PrimaryScan) GetEndKey() string { if m != nil && m.EndKey != nil { return *m.EndKey } return "" } func (m *CompiledQuery_PrimaryScan) GetEndInclusive() bool { if m != nil && m.EndInclusive != nil { return *m.EndInclusive } return false } func (m *CompiledQuery_PrimaryScan) GetStartPostfixValue() []string { if m != nil { return m.StartPostfixValue } return nil } func (m *CompiledQuery_PrimaryScan) GetEndPostfixValue() []string { if m != nil { return m.EndPostfixValue } return nil } func (m *CompiledQuery_PrimaryScan) GetEndUnappliedLogTimestampUs() int64 { if m != nil && m.EndUnappliedLogTimestampUs != nil { return *m.EndUnappliedLogTimestampUs } return 0 } type CompiledQuery_MergeJoinScan struct { IndexName *string `protobuf:"bytes,8,req,name=index_name" json:"index_name,omitempty"` PrefixValue []string `protobuf:"bytes,9,rep,name=prefix_value" json:"prefix_value,omitempty"` ValuePrefix *bool `protobuf:"varint,20,opt,name=value_prefix,def=0" json:"value_prefix,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *CompiledQuery_MergeJoinScan) Reset() { *m = CompiledQuery_MergeJoinScan{} } func (m *CompiledQuery_MergeJoinScan) String() string { return proto.CompactTextString(m) } func (*CompiledQuery_MergeJoinScan) ProtoMessage() {} const Default_CompiledQuery_MergeJoinScan_ValuePrefix bool = false func (m *CompiledQuery_MergeJoinScan) GetIndexName() string { if m != nil && m.IndexName != nil { return *m.IndexName } return "" } func (m *CompiledQuery_MergeJoinScan) GetPrefixValue() []string { if m != nil { return m.PrefixValue } return nil } func (m *CompiledQuery_MergeJoinScan) GetValuePrefix() bool { if m != nil && m.ValuePrefix != nil { return *m.ValuePrefix } return Default_CompiledQuery_MergeJoinScan_ValuePrefix } type CompiledQuery_EntityFilter struct { Distinct *bool `protobuf:"varint,14,opt,name=distinct,def=0" json:"distinct,omitempty"` Kind *string `protobuf:"bytes,17,opt,name=kind" json:"kind,omitempty"` Ancestor *Reference `protobuf:"bytes,18,opt,name=ancestor" json:"ancestor,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *CompiledQuery_EntityFilter) Reset() { *m = CompiledQuery_EntityFilter{} } func (m *CompiledQuery_EntityFilter) String() string { return proto.CompactTextString(m) } func (*CompiledQuery_EntityFilter) ProtoMessage() {} const Default_CompiledQuery_EntityFilter_Distinct bool = false func (m *CompiledQuery_EntityFilter) GetDistinct() bool { if m != nil && m.Distinct != nil { return *m.Distinct } return Default_CompiledQuery_EntityFilter_Distinct } func (m *CompiledQuery_EntityFilter) GetKind() string { if m != nil && m.Kind != nil { return *m.Kind } return "" } func (m *CompiledQuery_EntityFilter) GetAncestor() *Reference { if m != nil { return m.Ancestor } return nil } type CompiledCursor struct { Position *CompiledCursor_Position `protobuf:"group,2,opt,name=Position" json:"position,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *CompiledCursor) Reset() { *m = CompiledCursor{} } func (m *CompiledCursor) String() string { return proto.CompactTextString(m) } func (*CompiledCursor) ProtoMessage() {} func (m *CompiledCursor) GetPosition() *CompiledCursor_Position { if m != nil { return m.Position } return nil } type CompiledCursor_Position struct { StartKey *string `protobuf:"bytes,27,opt,name=start_key" json:"start_key,omitempty"` Indexvalue []*CompiledCursor_Position_IndexValue `protobuf:"group,29,rep,name=IndexValue" json:"indexvalue,omitempty"` Key *Reference `protobuf:"bytes,32,opt,name=key" json:"key,omitempty"` StartInclusive *bool `protobuf:"varint,28,opt,name=start_inclusive,def=1" json:"start_inclusive,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *CompiledCursor_Position) Reset() { *m = CompiledCursor_Position{} } func (m *CompiledCursor_Position) String() string { return proto.CompactTextString(m) } func (*CompiledCursor_Position) ProtoMessage() {} const Default_CompiledCursor_Position_StartInclusive bool = true func (m *CompiledCursor_Position) GetStartKey() string { if m != nil && m.StartKey != nil { return *m.StartKey } return "" } func (m *CompiledCursor_Position) GetIndexvalue() []*CompiledCursor_Position_IndexValue { if m != nil { return m.Indexvalue } return nil } func (m *CompiledCursor_Position) GetKey() *Reference { if m != nil { return m.Key } return nil } func (m *CompiledCursor_Position) GetStartInclusive() bool { if m != nil && m.StartInclusive != nil { return *m.StartInclusive } return Default_CompiledCursor_Position_StartInclusive } type CompiledCursor_Position_IndexValue struct { Property *string `protobuf:"bytes,30,opt,name=property" json:"property,omitempty"` Value *PropertyValue `protobuf:"bytes,31,req,name=value" json:"value,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *CompiledCursor_Position_IndexValue) Reset() { *m = CompiledCursor_Position_IndexValue{} } func (m *CompiledCursor_Position_IndexValue) String() string { return proto.CompactTextString(m) } func (*CompiledCursor_Position_IndexValue) ProtoMessage() {} func (m *CompiledCursor_Position_IndexValue) GetProperty() string { if m != nil && m.Property != nil { return *m.Property } return "" } func (m *CompiledCursor_Position_IndexValue) GetValue() *PropertyValue { if m != nil { return m.Value } return nil } type Cursor struct { Cursor *uint64 `protobuf:"fixed64,1,req,name=cursor" json:"cursor,omitempty"` App *string `protobuf:"bytes,2,opt,name=app" json:"app,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Cursor) Reset() { *m = Cursor{} } func (m *Cursor) String() string { return proto.CompactTextString(m) } func (*Cursor) ProtoMessage() {} func (m *Cursor) GetCursor() uint64 { if m != nil && m.Cursor != nil { return *m.Cursor } return 0 } func (m *Cursor) GetApp() string { if m != nil && m.App != nil { return *m.App } return "" } type Error struct { XXX_unrecognized []byte `json:"-"` } func (m *Error) Reset() { *m = Error{} } func (m *Error) String() string { return proto.CompactTextString(m) } func (*Error) ProtoMessage() {} type Cost struct { IndexWrites *int32 `protobuf:"varint,1,opt,name=index_writes" json:"index_writes,omitempty"` IndexWriteBytes *int32 `protobuf:"varint,2,opt,name=index_write_bytes" json:"index_write_bytes,omitempty"` EntityWrites *int32 `protobuf:"varint,3,opt,name=entity_writes" json:"entity_writes,omitempty"` EntityWriteBytes *int32 `protobuf:"varint,4,opt,name=entity_write_bytes" json:"entity_write_bytes,omitempty"` Commitcost *Cost_CommitCost `protobuf:"group,5,opt,name=CommitCost" json:"commitcost,omitempty"` ApproximateStorageDelta *int32 `protobuf:"varint,8,opt,name=approximate_storage_delta" json:"approximate_storage_delta,omitempty"` IdSequenceUpdates *int32 `protobuf:"varint,9,opt,name=id_sequence_updates" json:"id_sequence_updates,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Cost) Reset() { *m = Cost{} } func (m *Cost) String() string { return proto.CompactTextString(m) } func (*Cost) ProtoMessage() {} func (m *Cost) GetIndexWrites() int32 { if m != nil && m.IndexWrites != nil { return *m.IndexWrites } return 0 } func (m *Cost) GetIndexWriteBytes() int32 { if m != nil && m.IndexWriteBytes != nil { return *m.IndexWriteBytes } return 0 } func (m *Cost) GetEntityWrites() int32 { if m != nil && m.EntityWrites != nil { return *m.EntityWrites } return 0 } func (m *Cost) GetEntityWriteBytes() int32 { if m != nil && m.EntityWriteBytes != nil { return *m.EntityWriteBytes } return 0 } func (m *Cost) GetCommitcost() *Cost_CommitCost { if m != nil { return m.Commitcost } return nil } func (m *Cost) GetApproximateStorageDelta() int32 { if m != nil && m.ApproximateStorageDelta != nil { return *m.ApproximateStorageDelta } return 0 } func (m *Cost) GetIdSequenceUpdates() int32 { if m != nil && m.IdSequenceUpdates != nil { return *m.IdSequenceUpdates } return 0 } type Cost_CommitCost struct { RequestedEntityPuts *int32 `protobuf:"varint,6,opt,name=requested_entity_puts" json:"requested_entity_puts,omitempty"` RequestedEntityDeletes *int32 `protobuf:"varint,7,opt,name=requested_entity_deletes" json:"requested_entity_deletes,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Cost_CommitCost) Reset() { *m = Cost_CommitCost{} } func (m *Cost_CommitCost) String() string { return proto.CompactTextString(m) } func (*Cost_CommitCost) ProtoMessage() {} func (m *Cost_CommitCost) GetRequestedEntityPuts() int32 { if m != nil && m.RequestedEntityPuts != nil { return *m.RequestedEntityPuts } return 0 } func (m *Cost_CommitCost) GetRequestedEntityDeletes() int32 { if m != nil && m.RequestedEntityDeletes != nil { return *m.RequestedEntityDeletes } return 0 } type GetRequest struct { Header *InternalHeader `protobuf:"bytes,6,opt,name=header" json:"header,omitempty"` Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` FailoverMs *int64 `protobuf:"varint,3,opt,name=failover_ms" json:"failover_ms,omitempty"` Strong *bool `protobuf:"varint,4,opt,name=strong" json:"strong,omitempty"` AllowDeferred *bool `protobuf:"varint,5,opt,name=allow_deferred,def=0" json:"allow_deferred,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *GetRequest) Reset() { *m = GetRequest{} } func (m *GetRequest) String() string { return proto.CompactTextString(m) } func (*GetRequest) ProtoMessage() {} const Default_GetRequest_AllowDeferred bool = false func (m *GetRequest) GetHeader() *InternalHeader { if m != nil { return m.Header } return nil } func (m *GetRequest) GetKey() []*Reference { if m != nil { return m.Key } return nil } func (m *GetRequest) GetTransaction() *Transaction { if m != nil { return m.Transaction } return nil } func (m *GetRequest) GetFailoverMs() int64 { if m != nil && m.FailoverMs != nil { return *m.FailoverMs } return 0 } func (m *GetRequest) GetStrong() bool { if m != nil && m.Strong != nil { return *m.Strong } return false } func (m *GetRequest) GetAllowDeferred() bool { if m != nil && m.AllowDeferred != nil { return *m.AllowDeferred } return Default_GetRequest_AllowDeferred } type GetResponse struct { Entity []*GetResponse_Entity `protobuf:"group,1,rep,name=Entity" json:"entity,omitempty"` Deferred []*Reference `protobuf:"bytes,5,rep,name=deferred" json:"deferred,omitempty"` InOrder *bool `protobuf:"varint,6,opt,name=in_order,def=1" json:"in_order,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *GetResponse) Reset() { *m = GetResponse{} } func (m *GetResponse) String() string { return proto.CompactTextString(m) } func (*GetResponse) ProtoMessage() {} const Default_GetResponse_InOrder bool = true func (m *GetResponse) GetEntity() []*GetResponse_Entity { if m != nil { return m.Entity } return nil } func (m *GetResponse) GetDeferred() []*Reference { if m != nil { return m.Deferred } return nil } func (m *GetResponse) GetInOrder() bool { if m != nil && m.InOrder != nil { return *m.InOrder } return Default_GetResponse_InOrder } type GetResponse_Entity struct { Entity *EntityProto `protobuf:"bytes,2,opt,name=entity" json:"entity,omitempty"` Key *Reference `protobuf:"bytes,4,opt,name=key" json:"key,omitempty"` Version *int64 `protobuf:"varint,3,opt,name=version" json:"version,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *GetResponse_Entity) Reset() { *m = GetResponse_Entity{} } func (m *GetResponse_Entity) String() string { return proto.CompactTextString(m) } func (*GetResponse_Entity) ProtoMessage() {} func (m *GetResponse_Entity) GetEntity() *EntityProto { if m != nil { return m.Entity } return nil } func (m *GetResponse_Entity) GetKey() *Reference { if m != nil { return m.Key } return nil } func (m *GetResponse_Entity) GetVersion() int64 { if m != nil && m.Version != nil { return *m.Version } return 0 } type PutRequest struct { Header *InternalHeader `protobuf:"bytes,11,opt,name=header" json:"header,omitempty"` Entity []*EntityProto `protobuf:"bytes,1,rep,name=entity" json:"entity,omitempty"` Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` CompositeIndex []*CompositeIndex `protobuf:"bytes,3,rep,name=composite_index" json:"composite_index,omitempty"` Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"` Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"` MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"` Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` AutoIdPolicy *PutRequest_AutoIdPolicy `protobuf:"varint,10,opt,name=auto_id_policy,enum=appengine.PutRequest_AutoIdPolicy,def=0" json:"auto_id_policy,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *PutRequest) Reset() { *m = PutRequest{} } func (m *PutRequest) String() string { return proto.CompactTextString(m) } func (*PutRequest) ProtoMessage() {} const Default_PutRequest_Trusted bool = false const Default_PutRequest_Force bool = false const Default_PutRequest_MarkChanges bool = false const Default_PutRequest_AutoIdPolicy PutRequest_AutoIdPolicy = PutRequest_CURRENT func (m *PutRequest) GetHeader() *InternalHeader { if m != nil { return m.Header } return nil } func (m *PutRequest) GetEntity() []*EntityProto { if m != nil { return m.Entity } return nil } func (m *PutRequest) GetTransaction() *Transaction { if m != nil { return m.Transaction } return nil } func (m *PutRequest) GetCompositeIndex() []*CompositeIndex { if m != nil { return m.CompositeIndex } return nil } func (m *PutRequest) GetTrusted() bool { if m != nil && m.Trusted != nil { return *m.Trusted } return Default_PutRequest_Trusted } func (m *PutRequest) GetForce() bool { if m != nil && m.Force != nil { return *m.Force } return Default_PutRequest_Force } func (m *PutRequest) GetMarkChanges() bool { if m != nil && m.MarkChanges != nil { return *m.MarkChanges } return Default_PutRequest_MarkChanges } func (m *PutRequest) GetSnapshot() []*Snapshot { if m != nil { return m.Snapshot } return nil } func (m *PutRequest) GetAutoIdPolicy() PutRequest_AutoIdPolicy { if m != nil && m.AutoIdPolicy != nil { return *m.AutoIdPolicy } return Default_PutRequest_AutoIdPolicy } type PutResponse struct { Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` Cost *Cost `protobuf:"bytes,2,opt,name=cost" json:"cost,omitempty"` Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *PutResponse) Reset() { *m = PutResponse{} } func (m *PutResponse) String() string { return proto.CompactTextString(m) } func (*PutResponse) ProtoMessage() {} func (m *PutResponse) GetKey() []*Reference { if m != nil { return m.Key } return nil } func (m *PutResponse) GetCost() *Cost { if m != nil { return m.Cost } return nil } func (m *PutResponse) GetVersion() []int64 { if m != nil { return m.Version } return nil } type TouchRequest struct { Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"` Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` CompositeIndex []*CompositeIndex `protobuf:"bytes,2,rep,name=composite_index" json:"composite_index,omitempty"` Force *bool `protobuf:"varint,3,opt,name=force,def=0" json:"force,omitempty"` Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TouchRequest) Reset() { *m = TouchRequest{} } func (m *TouchRequest) String() string { return proto.CompactTextString(m) } func (*TouchRequest) ProtoMessage() {} const Default_TouchRequest_Force bool = false func (m *TouchRequest) GetHeader() *InternalHeader { if m != nil { return m.Header } return nil } func (m *TouchRequest) GetKey() []*Reference { if m != nil { return m.Key } return nil } func (m *TouchRequest) GetCompositeIndex() []*CompositeIndex { if m != nil { return m.CompositeIndex } return nil } func (m *TouchRequest) GetForce() bool { if m != nil && m.Force != nil { return *m.Force } return Default_TouchRequest_Force } func (m *TouchRequest) GetSnapshot() []*Snapshot { if m != nil { return m.Snapshot } return nil } type TouchResponse struct { Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TouchResponse) Reset() { *m = TouchResponse{} } func (m *TouchResponse) String() string { return proto.CompactTextString(m) } func (*TouchResponse) ProtoMessage() {} func (m *TouchResponse) GetCost() *Cost { if m != nil { return m.Cost } return nil } type DeleteRequest struct { Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"` Key []*Reference `protobuf:"bytes,6,rep,name=key" json:"key,omitempty"` Transaction *Transaction `protobuf:"bytes,5,opt,name=transaction" json:"transaction,omitempty"` Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"` Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"` MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"` Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } func (*DeleteRequest) ProtoMessage() {} const Default_DeleteRequest_Trusted bool = false const Default_DeleteRequest_Force bool = false const Default_DeleteRequest_MarkChanges bool = false func (m *DeleteRequest) GetHeader() *InternalHeader { if m != nil { return m.Header } return nil } func (m *DeleteRequest) GetKey() []*Reference { if m != nil { return m.Key } return nil } func (m *DeleteRequest) GetTransaction() *Transaction { if m != nil { return m.Transaction } return nil } func (m *DeleteRequest) GetTrusted() bool { if m != nil && m.Trusted != nil { return *m.Trusted } return Default_DeleteRequest_Trusted } func (m *DeleteRequest) GetForce() bool { if m != nil && m.Force != nil { return *m.Force } return Default_DeleteRequest_Force } func (m *DeleteRequest) GetMarkChanges() bool { if m != nil && m.MarkChanges != nil { return *m.MarkChanges } return Default_DeleteRequest_MarkChanges } func (m *DeleteRequest) GetSnapshot() []*Snapshot { if m != nil { return m.Snapshot } return nil } type DeleteResponse struct { Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *DeleteResponse) Reset() { *m = DeleteResponse{} } func (m *DeleteResponse) String() string { return proto.CompactTextString(m) } func (*DeleteResponse) ProtoMessage() {} func (m *DeleteResponse) GetCost() *Cost { if m != nil { return m.Cost } return nil } func (m *DeleteResponse) GetVersion() []int64 { if m != nil { return m.Version } return nil } type NextRequest struct { Header *InternalHeader `protobuf:"bytes,5,opt,name=header" json:"header,omitempty"` Cursor *Cursor `protobuf:"bytes,1,req,name=cursor" json:"cursor,omitempty"` Count *int32 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"` Offset *int32 `protobuf:"varint,4,opt,name=offset,def=0" json:"offset,omitempty"` Compile *bool `protobuf:"varint,3,opt,name=compile,def=0" json:"compile,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *NextRequest) Reset() { *m = NextRequest{} } func (m *NextRequest) String() string { return proto.CompactTextString(m) } func (*NextRequest) ProtoMessage() {} const Default_NextRequest_Offset int32 = 0 const Default_NextRequest_Compile bool = false func (m *NextRequest) GetHeader() *InternalHeader { if m != nil { return m.Header } return nil } func (m *NextRequest) GetCursor() *Cursor { if m != nil { return m.Cursor } return nil } func (m *NextRequest) GetCount() int32 { if m != nil && m.Count != nil { return *m.Count } return 0 } func (m *NextRequest) GetOffset() int32 { if m != nil && m.Offset != nil { return *m.Offset } return Default_NextRequest_Offset } func (m *NextRequest) GetCompile() bool { if m != nil && m.Compile != nil { return *m.Compile } return Default_NextRequest_Compile } type QueryResult struct { Cursor *Cursor `protobuf:"bytes,1,opt,name=cursor" json:"cursor,omitempty"` Result []*EntityProto `protobuf:"bytes,2,rep,name=result" json:"result,omitempty"` SkippedResults *int32 `protobuf:"varint,7,opt,name=skipped_results" json:"skipped_results,omitempty"` MoreResults *bool `protobuf:"varint,3,req,name=more_results" json:"more_results,omitempty"` KeysOnly *bool `protobuf:"varint,4,opt,name=keys_only" json:"keys_only,omitempty"` IndexOnly *bool `protobuf:"varint,9,opt,name=index_only" json:"index_only,omitempty"` SmallOps *bool `protobuf:"varint,10,opt,name=small_ops" json:"small_ops,omitempty"` CompiledQuery *CompiledQuery `protobuf:"bytes,5,opt,name=compiled_query" json:"compiled_query,omitempty"` CompiledCursor *CompiledCursor `protobuf:"bytes,6,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"` Index []*CompositeIndex `protobuf:"bytes,8,rep,name=index" json:"index,omitempty"` Version []int64 `protobuf:"varint,11,rep,name=version" json:"version,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *QueryResult) Reset() { *m = QueryResult{} } func (m *QueryResult) String() string { return proto.CompactTextString(m) } func (*QueryResult) ProtoMessage() {} func (m *QueryResult) GetCursor() *Cursor { if m != nil { return m.Cursor } return nil } func (m *QueryResult) GetResult() []*EntityProto { if m != nil { return m.Result } return nil } func (m *QueryResult) GetSkippedResults() int32 { if m != nil && m.SkippedResults != nil { return *m.SkippedResults } return 0 } func (m *QueryResult) GetMoreResults() bool { if m != nil && m.MoreResults != nil { return *m.MoreResults } return false } func (m *QueryResult) GetKeysOnly() bool { if m != nil && m.KeysOnly != nil { return *m.KeysOnly } return false } func (m *QueryResult) GetIndexOnly() bool { if m != nil && m.IndexOnly != nil { return *m.IndexOnly } return false } func (m *QueryResult) GetSmallOps() bool { if m != nil && m.SmallOps != nil { return *m.SmallOps } return false } func (m *QueryResult) GetCompiledQuery() *CompiledQuery { if m != nil { return m.CompiledQuery } return nil } func (m *QueryResult) GetCompiledCursor() *CompiledCursor { if m != nil { return m.CompiledCursor } return nil } func (m *QueryResult) GetIndex() []*CompositeIndex { if m != nil { return m.Index } return nil } func (m *QueryResult) GetVersion() []int64 { if m != nil { return m.Version } return nil } type AllocateIdsRequest struct { Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"` ModelKey *Reference `protobuf:"bytes,1,opt,name=model_key" json:"model_key,omitempty"` Size *int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"` Max *int64 `protobuf:"varint,3,opt,name=max" json:"max,omitempty"` Reserve []*Reference `protobuf:"bytes,5,rep,name=reserve" json:"reserve,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} } func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) } func (*AllocateIdsRequest) ProtoMessage() {} func (m *AllocateIdsRequest) GetHeader() *InternalHeader { if m != nil { return m.Header } return nil } func (m *AllocateIdsRequest) GetModelKey() *Reference { if m != nil { return m.ModelKey } return nil } func (m *AllocateIdsRequest) GetSize() int64 { if m != nil && m.Size != nil { return *m.Size } return 0 } func (m *AllocateIdsRequest) GetMax() int64 { if m != nil && m.Max != nil { return *m.Max } return 0 } func (m *AllocateIdsRequest) GetReserve() []*Reference { if m != nil { return m.Reserve } return nil } type AllocateIdsResponse struct { Start *int64 `protobuf:"varint,1,req,name=start" json:"start,omitempty"` End *int64 `protobuf:"varint,2,req,name=end" json:"end,omitempty"` Cost *Cost `protobuf:"bytes,3,opt,name=cost" json:"cost,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} } func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) } func (*AllocateIdsResponse) ProtoMessage() {} func (m *AllocateIdsResponse) GetStart() int64 { if m != nil && m.Start != nil { return *m.Start } return 0 } func (m *AllocateIdsResponse) GetEnd() int64 { if m != nil && m.End != nil { return *m.End } return 0 } func (m *AllocateIdsResponse) GetCost() *Cost { if m != nil { return m.Cost } return nil } type CompositeIndices struct { Index []*CompositeIndex `protobuf:"bytes,1,rep,name=index" json:"index,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *CompositeIndices) Reset() { *m = CompositeIndices{} } func (m *CompositeIndices) String() string { return proto.CompactTextString(m) } func (*CompositeIndices) ProtoMessage() {} func (m *CompositeIndices) GetIndex() []*CompositeIndex { if m != nil { return m.Index } return nil } type AddActionsRequest struct { Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"` Transaction *Transaction `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"` Action []*Action `protobuf:"bytes,2,rep,name=action" json:"action,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *AddActionsRequest) Reset() { *m = AddActionsRequest{} } func (m *AddActionsRequest) String() string { return proto.CompactTextString(m) } func (*AddActionsRequest) ProtoMessage() {} func (m *AddActionsRequest) GetHeader() *InternalHeader { if m != nil { return m.Header } return nil } func (m *AddActionsRequest) GetTransaction() *Transaction { if m != nil { return m.Transaction } return nil } func (m *AddActionsRequest) GetAction() []*Action { if m != nil { return m.Action } return nil } type AddActionsResponse struct { XXX_unrecognized []byte `json:"-"` } func (m *AddActionsResponse) Reset() { *m = AddActionsResponse{} } func (m *AddActionsResponse) String() string { return proto.CompactTextString(m) } func (*AddActionsResponse) ProtoMessage() {} type BeginTransactionRequest struct { Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"` App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"` AllowMultipleEg *bool `protobuf:"varint,2,opt,name=allow_multiple_eg,def=0" json:"allow_multiple_eg,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} } func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) } func (*BeginTransactionRequest) ProtoMessage() {} const Default_BeginTransactionRequest_AllowMultipleEg bool = false func (m *BeginTransactionRequest) GetHeader() *InternalHeader { if m != nil { return m.Header } return nil } func (m *BeginTransactionRequest) GetApp() string { if m != nil && m.App != nil { return *m.App } return "" } func (m *BeginTransactionRequest) GetAllowMultipleEg() bool { if m != nil && m.AllowMultipleEg != nil { return *m.AllowMultipleEg } return Default_BeginTransactionRequest_AllowMultipleEg } type CommitResponse struct { Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` Version []*CommitResponse_Version `protobuf:"group,3,rep,name=Version" json:"version,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *CommitResponse) Reset() { *m = CommitResponse{} } func (m *CommitResponse) String() string { return proto.CompactTextString(m) } func (*CommitResponse) ProtoMessage() {} func (m *CommitResponse) GetCost() *Cost { if m != nil { return m.Cost } return nil } func (m *CommitResponse) GetVersion() []*CommitResponse_Version { if m != nil { return m.Version } return nil } type CommitResponse_Version struct { RootEntityKey *Reference `protobuf:"bytes,4,req,name=root_entity_key" json:"root_entity_key,omitempty"` Version *int64 `protobuf:"varint,5,req,name=version" json:"version,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *CommitResponse_Version) Reset() { *m = CommitResponse_Version{} } func (m *CommitResponse_Version) String() string { return proto.CompactTextString(m) } func (*CommitResponse_Version) ProtoMessage() {} func (m *CommitResponse_Version) GetRootEntityKey() *Reference { if m != nil { return m.RootEntityKey } return nil } func (m *CommitResponse_Version) GetVersion() int64 { if m != nil && m.Version != nil { return *m.Version } return 0 } func init() { } ================================================ FILE: vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto ================================================ syntax = "proto2"; option go_package = "datastore"; package appengine; message Action{} message PropertyValue { optional int64 int64Value = 1; optional bool booleanValue = 2; optional string stringValue = 3; optional double doubleValue = 4; optional group PointValue = 5 { required double x = 6; required double y = 7; } optional group UserValue = 8 { required string email = 9; required string auth_domain = 10; optional string nickname = 11; optional string federated_identity = 21; optional string federated_provider = 22; } optional group ReferenceValue = 12 { required string app = 13; optional string name_space = 20; repeated group PathElement = 14 { required string type = 15; optional int64 id = 16; optional string name = 17; } } } message Property { enum Meaning { NO_MEANING = 0; BLOB = 14; TEXT = 15; BYTESTRING = 16; ATOM_CATEGORY = 1; ATOM_LINK = 2; ATOM_TITLE = 3; ATOM_CONTENT = 4; ATOM_SUMMARY = 5; ATOM_AUTHOR = 6; GD_WHEN = 7; GD_EMAIL = 8; GEORSS_POINT = 9; GD_IM = 10; GD_PHONENUMBER = 11; GD_POSTALADDRESS = 12; GD_RATING = 13; BLOBKEY = 17; ENTITY_PROTO = 19; INDEX_VALUE = 18; }; optional Meaning meaning = 1 [default = NO_MEANING]; optional string meaning_uri = 2; required string name = 3; required PropertyValue value = 5; required bool multiple = 4; optional bool searchable = 6 [default=false]; enum FtsTokenizationOption { HTML = 1; ATOM = 2; } optional FtsTokenizationOption fts_tokenization_option = 8; optional string locale = 9 [default = "en"]; } message Path { repeated group Element = 1 { required string type = 2; optional int64 id = 3; optional string name = 4; } } message Reference { required string app = 13; optional string name_space = 20; required Path path = 14; } message User { required string email = 1; required string auth_domain = 2; optional string nickname = 3; optional string federated_identity = 6; optional string federated_provider = 7; } message EntityProto { required Reference key = 13; required Path entity_group = 16; optional User owner = 17; enum Kind { GD_CONTACT = 1; GD_EVENT = 2; GD_MESSAGE = 3; } optional Kind kind = 4; optional string kind_uri = 5; repeated Property property = 14; repeated Property raw_property = 15; optional int32 rank = 18; } message CompositeProperty { required int64 index_id = 1; repeated string value = 2; } message Index { required string entity_type = 1; required bool ancestor = 5; repeated group Property = 2 { required string name = 3; enum Direction { ASCENDING = 1; DESCENDING = 2; } optional Direction direction = 4 [default = ASCENDING]; } } message CompositeIndex { required string app_id = 1; required int64 id = 2; required Index definition = 3; enum State { WRITE_ONLY = 1; READ_WRITE = 2; DELETED = 3; ERROR = 4; } required State state = 4; optional bool only_use_if_required = 6 [default = false]; } message IndexPostfix { message IndexValue { required string property_name = 1; required PropertyValue value = 2; } repeated IndexValue index_value = 1; optional Reference key = 2; optional bool before = 3 [default=true]; } message IndexPosition { optional string key = 1; optional bool before = 2 [default=true]; } message Snapshot { enum Status { INACTIVE = 0; ACTIVE = 1; } required int64 ts = 1; } message InternalHeader { optional string qos = 1; } message Transaction { optional InternalHeader header = 4; required fixed64 handle = 1; required string app = 2; optional bool mark_changes = 3 [default = false]; } message Query { optional InternalHeader header = 39; required string app = 1; optional string name_space = 29; optional string kind = 3; optional Reference ancestor = 17; repeated group Filter = 4 { enum Operator { LESS_THAN = 1; LESS_THAN_OR_EQUAL = 2; GREATER_THAN = 3; GREATER_THAN_OR_EQUAL = 4; EQUAL = 5; IN = 6; EXISTS = 7; } required Operator op = 6; repeated Property property = 14; } optional string search_query = 8; repeated group Order = 9 { enum Direction { ASCENDING = 1; DESCENDING = 2; } required string property = 10; optional Direction direction = 11 [default = ASCENDING]; } enum Hint { ORDER_FIRST = 1; ANCESTOR_FIRST = 2; FILTER_FIRST = 3; } optional Hint hint = 18; optional int32 count = 23; optional int32 offset = 12 [default = 0]; optional int32 limit = 16; optional CompiledCursor compiled_cursor = 30; optional CompiledCursor end_compiled_cursor = 31; repeated CompositeIndex composite_index = 19; optional bool require_perfect_plan = 20 [default = false]; optional bool keys_only = 21 [default = false]; optional Transaction transaction = 22; optional bool compile = 25 [default = false]; optional int64 failover_ms = 26; optional bool strong = 32; repeated string property_name = 33; repeated string group_by_property_name = 34; optional bool distinct = 24; optional int64 min_safe_time_seconds = 35; repeated string safe_replica_name = 36; optional bool persist_offset = 37 [default=false]; } message CompiledQuery { required group PrimaryScan = 1 { optional string index_name = 2; optional string start_key = 3; optional bool start_inclusive = 4; optional string end_key = 5; optional bool end_inclusive = 6; repeated string start_postfix_value = 22; repeated string end_postfix_value = 23; optional int64 end_unapplied_log_timestamp_us = 19; } repeated group MergeJoinScan = 7 { required string index_name = 8; repeated string prefix_value = 9; optional bool value_prefix = 20 [default=false]; } optional Index index_def = 21; optional int32 offset = 10 [default = 0]; optional int32 limit = 11; required bool keys_only = 12; repeated string property_name = 24; optional int32 distinct_infix_size = 25; optional group EntityFilter = 13 { optional bool distinct = 14 [default=false]; optional string kind = 17; optional Reference ancestor = 18; } } message CompiledCursor { optional group Position = 2 { optional string start_key = 27; repeated group IndexValue = 29 { optional string property = 30; required PropertyValue value = 31; } optional Reference key = 32; optional bool start_inclusive = 28 [default=true]; } } message Cursor { required fixed64 cursor = 1; optional string app = 2; } message Error { enum ErrorCode { BAD_REQUEST = 1; CONCURRENT_TRANSACTION = 2; INTERNAL_ERROR = 3; NEED_INDEX = 4; TIMEOUT = 5; PERMISSION_DENIED = 6; BIGTABLE_ERROR = 7; COMMITTED_BUT_STILL_APPLYING = 8; CAPABILITY_DISABLED = 9; TRY_ALTERNATE_BACKEND = 10; SAFE_TIME_TOO_OLD = 11; } } message Cost { optional int32 index_writes = 1; optional int32 index_write_bytes = 2; optional int32 entity_writes = 3; optional int32 entity_write_bytes = 4; optional group CommitCost = 5 { optional int32 requested_entity_puts = 6; optional int32 requested_entity_deletes = 7; }; optional int32 approximate_storage_delta = 8; optional int32 id_sequence_updates = 9; } message GetRequest { optional InternalHeader header = 6; repeated Reference key = 1; optional Transaction transaction = 2; optional int64 failover_ms = 3; optional bool strong = 4; optional bool allow_deferred = 5 [default=false]; } message GetResponse { repeated group Entity = 1 { optional EntityProto entity = 2; optional Reference key = 4; optional int64 version = 3; } repeated Reference deferred = 5; optional bool in_order = 6 [default=true]; } message PutRequest { optional InternalHeader header = 11; repeated EntityProto entity = 1; optional Transaction transaction = 2; repeated CompositeIndex composite_index = 3; optional bool trusted = 4 [default = false]; optional bool force = 7 [default = false]; optional bool mark_changes = 8 [default = false]; repeated Snapshot snapshot = 9; enum AutoIdPolicy { CURRENT = 0; SEQUENTIAL = 1; } optional AutoIdPolicy auto_id_policy = 10 [default = CURRENT]; } message PutResponse { repeated Reference key = 1; optional Cost cost = 2; repeated int64 version = 3; } message TouchRequest { optional InternalHeader header = 10; repeated Reference key = 1; repeated CompositeIndex composite_index = 2; optional bool force = 3 [default = false]; repeated Snapshot snapshot = 9; } message TouchResponse { optional Cost cost = 1; } message DeleteRequest { optional InternalHeader header = 10; repeated Reference key = 6; optional Transaction transaction = 5; optional bool trusted = 4 [default = false]; optional bool force = 7 [default = false]; optional bool mark_changes = 8 [default = false]; repeated Snapshot snapshot = 9; } message DeleteResponse { optional Cost cost = 1; repeated int64 version = 3; } message NextRequest { optional InternalHeader header = 5; required Cursor cursor = 1; optional int32 count = 2; optional int32 offset = 4 [default = 0]; optional bool compile = 3 [default = false]; } message QueryResult { optional Cursor cursor = 1; repeated EntityProto result = 2; optional int32 skipped_results = 7; required bool more_results = 3; optional bool keys_only = 4; optional bool index_only = 9; optional bool small_ops = 10; optional CompiledQuery compiled_query = 5; optional CompiledCursor compiled_cursor = 6; repeated CompositeIndex index = 8; repeated int64 version = 11; } message AllocateIdsRequest { optional InternalHeader header = 4; optional Reference model_key = 1; optional int64 size = 2; optional int64 max = 3; repeated Reference reserve = 5; } message AllocateIdsResponse { required int64 start = 1; required int64 end = 2; optional Cost cost = 3; } message CompositeIndices { repeated CompositeIndex index = 1; } message AddActionsRequest { optional InternalHeader header = 3; required Transaction transaction = 1; repeated Action action = 2; } message AddActionsResponse { } message BeginTransactionRequest { optional InternalHeader header = 3; required string app = 1; optional bool allow_multiple_eg = 2 [default = false]; } message CommitResponse { optional Cost cost = 1; repeated group Version = 3 { required Reference root_entity_key = 4; required int64 version = 5; } } ================================================ FILE: vendor/google.golang.org/appengine/internal/identity.go ================================================ // Copyright 2011 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. package internal import netcontext "golang.org/x/net/context" // These functions are implementations of the wrapper functions // in ../appengine/identity.go. See that file for commentary. func AppID(c netcontext.Context) string { return appID(FullyQualifiedAppID(c)) } ================================================ FILE: vendor/google.golang.org/appengine/internal/identity_classic.go ================================================ // Copyright 2015 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. // +build appengine package internal import ( "appengine" netcontext "golang.org/x/net/context" ) func DefaultVersionHostname(ctx netcontext.Context) string { return appengine.DefaultVersionHostname(fromContext(ctx)) } func RequestID(ctx netcontext.Context) string { return appengine.RequestID(fromContext(ctx)) } func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() } func ServerSoftware() string { return appengine.ServerSoftware() } func ModuleName(ctx netcontext.Context) string { return appengine.ModuleName(fromContext(ctx)) } func VersionID(ctx netcontext.Context) string { return appengine.VersionID(fromContext(ctx)) } func InstanceID() string { return appengine.InstanceID() } func IsDevAppServer() bool { return appengine.IsDevAppServer() } func fullyQualifiedAppID(ctx netcontext.Context) string { return fromContext(ctx).FullyQualifiedAppID() } ================================================ FILE: vendor/google.golang.org/appengine/internal/identity_vm.go ================================================ // Copyright 2011 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. // +build !appengine package internal import ( "net/http" "os" netcontext "golang.org/x/net/context" ) // These functions are implementations of the wrapper functions // in ../appengine/identity.go. See that file for commentary. const ( hDefaultVersionHostname = "X-AppEngine-Default-Version-Hostname" hRequestLogId = "X-AppEngine-Request-Log-Id" hDatacenter = "X-AppEngine-Datacenter" ) func ctxHeaders(ctx netcontext.Context) http.Header { return fromContext(ctx).Request().Header } func DefaultVersionHostname(ctx netcontext.Context) string { return ctxHeaders(ctx).Get(hDefaultVersionHostname) } func RequestID(ctx netcontext.Context) string { return ctxHeaders(ctx).Get(hRequestLogId) } func Datacenter(ctx netcontext.Context) string { return ctxHeaders(ctx).Get(hDatacenter) } func ServerSoftware() string { // TODO(dsymonds): Remove fallback when we've verified this. if s := os.Getenv("SERVER_SOFTWARE"); s != "" { return s } return "Google App Engine/1.x.x" } // TODO(dsymonds): Remove the metadata fetches. func ModuleName(_ netcontext.Context) string { if s := os.Getenv("GAE_MODULE_NAME"); s != "" { return s } return string(mustGetMetadata("instance/attributes/gae_backend_name")) } func VersionID(_ netcontext.Context) string { if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" { return s1 + "." + s2 } return string(mustGetMetadata("instance/attributes/gae_backend_version")) + "." + string(mustGetMetadata("instance/attributes/gae_backend_minor_version")) } func InstanceID() string { if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" { return s } return string(mustGetMetadata("instance/attributes/gae_backend_instance")) } func partitionlessAppID() string { // gae_project has everything except the partition prefix. appID := os.Getenv("GAE_LONG_APP_ID") if appID == "" { appID = string(mustGetMetadata("instance/attributes/gae_project")) } return appID } func fullyQualifiedAppID(_ netcontext.Context) string { appID := partitionlessAppID() part := os.Getenv("GAE_PARTITION") if part == "" { part = string(mustGetMetadata("instance/attributes/gae_partition")) } if part != "" { appID = part + "~" + appID } return appID } func IsDevAppServer() bool { return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" } ================================================ FILE: vendor/google.golang.org/appengine/internal/image/images_service.pb.go ================================================ // Code generated by protoc-gen-go. // source: google.golang.org/appengine/internal/image/images_service.proto // DO NOT EDIT! /* Package image is a generated protocol buffer package. It is generated from these files: google.golang.org/appengine/internal/image/images_service.proto It has these top-level messages: ImagesServiceError ImagesServiceTransform Transform ImageData InputSettings OutputSettings ImagesTransformRequest ImagesTransformResponse CompositeImageOptions ImagesCanvas ImagesCompositeRequest ImagesCompositeResponse ImagesHistogramRequest ImagesHistogram ImagesHistogramResponse ImagesGetUrlBaseRequest ImagesGetUrlBaseResponse ImagesDeleteUrlBaseRequest ImagesDeleteUrlBaseResponse */ package image import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf type ImagesServiceError_ErrorCode int32 const ( ImagesServiceError_UNSPECIFIED_ERROR ImagesServiceError_ErrorCode = 1 ImagesServiceError_BAD_TRANSFORM_DATA ImagesServiceError_ErrorCode = 2 ImagesServiceError_NOT_IMAGE ImagesServiceError_ErrorCode = 3 ImagesServiceError_BAD_IMAGE_DATA ImagesServiceError_ErrorCode = 4 ImagesServiceError_IMAGE_TOO_LARGE ImagesServiceError_ErrorCode = 5 ImagesServiceError_INVALID_BLOB_KEY ImagesServiceError_ErrorCode = 6 ImagesServiceError_ACCESS_DENIED ImagesServiceError_ErrorCode = 7 ImagesServiceError_OBJECT_NOT_FOUND ImagesServiceError_ErrorCode = 8 ) var ImagesServiceError_ErrorCode_name = map[int32]string{ 1: "UNSPECIFIED_ERROR", 2: "BAD_TRANSFORM_DATA", 3: "NOT_IMAGE", 4: "BAD_IMAGE_DATA", 5: "IMAGE_TOO_LARGE", 6: "INVALID_BLOB_KEY", 7: "ACCESS_DENIED", 8: "OBJECT_NOT_FOUND", } var ImagesServiceError_ErrorCode_value = map[string]int32{ "UNSPECIFIED_ERROR": 1, "BAD_TRANSFORM_DATA": 2, "NOT_IMAGE": 3, "BAD_IMAGE_DATA": 4, "IMAGE_TOO_LARGE": 5, "INVALID_BLOB_KEY": 6, "ACCESS_DENIED": 7, "OBJECT_NOT_FOUND": 8, } func (x ImagesServiceError_ErrorCode) Enum() *ImagesServiceError_ErrorCode { p := new(ImagesServiceError_ErrorCode) *p = x return p } func (x ImagesServiceError_ErrorCode) String() string { return proto.EnumName(ImagesServiceError_ErrorCode_name, int32(x)) } func (x *ImagesServiceError_ErrorCode) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(ImagesServiceError_ErrorCode_value, data, "ImagesServiceError_ErrorCode") if err != nil { return err } *x = ImagesServiceError_ErrorCode(value) return nil } type ImagesServiceTransform_Type int32 const ( ImagesServiceTransform_RESIZE ImagesServiceTransform_Type = 1 ImagesServiceTransform_ROTATE ImagesServiceTransform_Type = 2 ImagesServiceTransform_HORIZONTAL_FLIP ImagesServiceTransform_Type = 3 ImagesServiceTransform_VERTICAL_FLIP ImagesServiceTransform_Type = 4 ImagesServiceTransform_CROP ImagesServiceTransform_Type = 5 ImagesServiceTransform_IM_FEELING_LUCKY ImagesServiceTransform_Type = 6 ) var ImagesServiceTransform_Type_name = map[int32]string{ 1: "RESIZE", 2: "ROTATE", 3: "HORIZONTAL_FLIP", 4: "VERTICAL_FLIP", 5: "CROP", 6: "IM_FEELING_LUCKY", } var ImagesServiceTransform_Type_value = map[string]int32{ "RESIZE": 1, "ROTATE": 2, "HORIZONTAL_FLIP": 3, "VERTICAL_FLIP": 4, "CROP": 5, "IM_FEELING_LUCKY": 6, } func (x ImagesServiceTransform_Type) Enum() *ImagesServiceTransform_Type { p := new(ImagesServiceTransform_Type) *p = x return p } func (x ImagesServiceTransform_Type) String() string { return proto.EnumName(ImagesServiceTransform_Type_name, int32(x)) } func (x *ImagesServiceTransform_Type) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(ImagesServiceTransform_Type_value, data, "ImagesServiceTransform_Type") if err != nil { return err } *x = ImagesServiceTransform_Type(value) return nil } type InputSettings_ORIENTATION_CORRECTION_TYPE int32 const ( InputSettings_UNCHANGED_ORIENTATION InputSettings_ORIENTATION_CORRECTION_TYPE = 0 InputSettings_CORRECT_ORIENTATION InputSettings_ORIENTATION_CORRECTION_TYPE = 1 ) var InputSettings_ORIENTATION_CORRECTION_TYPE_name = map[int32]string{ 0: "UNCHANGED_ORIENTATION", 1: "CORRECT_ORIENTATION", } var InputSettings_ORIENTATION_CORRECTION_TYPE_value = map[string]int32{ "UNCHANGED_ORIENTATION": 0, "CORRECT_ORIENTATION": 1, } func (x InputSettings_ORIENTATION_CORRECTION_TYPE) Enum() *InputSettings_ORIENTATION_CORRECTION_TYPE { p := new(InputSettings_ORIENTATION_CORRECTION_TYPE) *p = x return p } func (x InputSettings_ORIENTATION_CORRECTION_TYPE) String() string { return proto.EnumName(InputSettings_ORIENTATION_CORRECTION_TYPE_name, int32(x)) } func (x *InputSettings_ORIENTATION_CORRECTION_TYPE) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(InputSettings_ORIENTATION_CORRECTION_TYPE_value, data, "InputSettings_ORIENTATION_CORRECTION_TYPE") if err != nil { return err } *x = InputSettings_ORIENTATION_CORRECTION_TYPE(value) return nil } type OutputSettings_MIME_TYPE int32 const ( OutputSettings_PNG OutputSettings_MIME_TYPE = 0 OutputSettings_JPEG OutputSettings_MIME_TYPE = 1 OutputSettings_WEBP OutputSettings_MIME_TYPE = 2 ) var OutputSettings_MIME_TYPE_name = map[int32]string{ 0: "PNG", 1: "JPEG", 2: "WEBP", } var OutputSettings_MIME_TYPE_value = map[string]int32{ "PNG": 0, "JPEG": 1, "WEBP": 2, } func (x OutputSettings_MIME_TYPE) Enum() *OutputSettings_MIME_TYPE { p := new(OutputSettings_MIME_TYPE) *p = x return p } func (x OutputSettings_MIME_TYPE) String() string { return proto.EnumName(OutputSettings_MIME_TYPE_name, int32(x)) } func (x *OutputSettings_MIME_TYPE) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(OutputSettings_MIME_TYPE_value, data, "OutputSettings_MIME_TYPE") if err != nil { return err } *x = OutputSettings_MIME_TYPE(value) return nil } type CompositeImageOptions_ANCHOR int32 const ( CompositeImageOptions_TOP_LEFT CompositeImageOptions_ANCHOR = 0 CompositeImageOptions_TOP CompositeImageOptions_ANCHOR = 1 CompositeImageOptions_TOP_RIGHT CompositeImageOptions_ANCHOR = 2 CompositeImageOptions_LEFT CompositeImageOptions_ANCHOR = 3 CompositeImageOptions_CENTER CompositeImageOptions_ANCHOR = 4 CompositeImageOptions_RIGHT CompositeImageOptions_ANCHOR = 5 CompositeImageOptions_BOTTOM_LEFT CompositeImageOptions_ANCHOR = 6 CompositeImageOptions_BOTTOM CompositeImageOptions_ANCHOR = 7 CompositeImageOptions_BOTTOM_RIGHT CompositeImageOptions_ANCHOR = 8 ) var CompositeImageOptions_ANCHOR_name = map[int32]string{ 0: "TOP_LEFT", 1: "TOP", 2: "TOP_RIGHT", 3: "LEFT", 4: "CENTER", 5: "RIGHT", 6: "BOTTOM_LEFT", 7: "BOTTOM", 8: "BOTTOM_RIGHT", } var CompositeImageOptions_ANCHOR_value = map[string]int32{ "TOP_LEFT": 0, "TOP": 1, "TOP_RIGHT": 2, "LEFT": 3, "CENTER": 4, "RIGHT": 5, "BOTTOM_LEFT": 6, "BOTTOM": 7, "BOTTOM_RIGHT": 8, } func (x CompositeImageOptions_ANCHOR) Enum() *CompositeImageOptions_ANCHOR { p := new(CompositeImageOptions_ANCHOR) *p = x return p } func (x CompositeImageOptions_ANCHOR) String() string { return proto.EnumName(CompositeImageOptions_ANCHOR_name, int32(x)) } func (x *CompositeImageOptions_ANCHOR) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(CompositeImageOptions_ANCHOR_value, data, "CompositeImageOptions_ANCHOR") if err != nil { return err } *x = CompositeImageOptions_ANCHOR(value) return nil } type ImagesServiceError struct { XXX_unrecognized []byte `json:"-"` } func (m *ImagesServiceError) Reset() { *m = ImagesServiceError{} } func (m *ImagesServiceError) String() string { return proto.CompactTextString(m) } func (*ImagesServiceError) ProtoMessage() {} type ImagesServiceTransform struct { XXX_unrecognized []byte `json:"-"` } func (m *ImagesServiceTransform) Reset() { *m = ImagesServiceTransform{} } func (m *ImagesServiceTransform) String() string { return proto.CompactTextString(m) } func (*ImagesServiceTransform) ProtoMessage() {} type Transform struct { Width *int32 `protobuf:"varint,1,opt,name=width" json:"width,omitempty"` Height *int32 `protobuf:"varint,2,opt,name=height" json:"height,omitempty"` CropToFit *bool `protobuf:"varint,11,opt,name=crop_to_fit,def=0" json:"crop_to_fit,omitempty"` CropOffsetX *float32 `protobuf:"fixed32,12,opt,name=crop_offset_x,def=0.5" json:"crop_offset_x,omitempty"` CropOffsetY *float32 `protobuf:"fixed32,13,opt,name=crop_offset_y,def=0.5" json:"crop_offset_y,omitempty"` Rotate *int32 `protobuf:"varint,3,opt,name=rotate,def=0" json:"rotate,omitempty"` HorizontalFlip *bool `protobuf:"varint,4,opt,name=horizontal_flip,def=0" json:"horizontal_flip,omitempty"` VerticalFlip *bool `protobuf:"varint,5,opt,name=vertical_flip,def=0" json:"vertical_flip,omitempty"` CropLeftX *float32 `protobuf:"fixed32,6,opt,name=crop_left_x,def=0" json:"crop_left_x,omitempty"` CropTopY *float32 `protobuf:"fixed32,7,opt,name=crop_top_y,def=0" json:"crop_top_y,omitempty"` CropRightX *float32 `protobuf:"fixed32,8,opt,name=crop_right_x,def=1" json:"crop_right_x,omitempty"` CropBottomY *float32 `protobuf:"fixed32,9,opt,name=crop_bottom_y,def=1" json:"crop_bottom_y,omitempty"` Autolevels *bool `protobuf:"varint,10,opt,name=autolevels,def=0" json:"autolevels,omitempty"` AllowStretch *bool `protobuf:"varint,14,opt,name=allow_stretch,def=0" json:"allow_stretch,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Transform) Reset() { *m = Transform{} } func (m *Transform) String() string { return proto.CompactTextString(m) } func (*Transform) ProtoMessage() {} const Default_Transform_CropToFit bool = false const Default_Transform_CropOffsetX float32 = 0.5 const Default_Transform_CropOffsetY float32 = 0.5 const Default_Transform_Rotate int32 = 0 const Default_Transform_HorizontalFlip bool = false const Default_Transform_VerticalFlip bool = false const Default_Transform_CropLeftX float32 = 0 const Default_Transform_CropTopY float32 = 0 const Default_Transform_CropRightX float32 = 1 const Default_Transform_CropBottomY float32 = 1 const Default_Transform_Autolevels bool = false const Default_Transform_AllowStretch bool = false func (m *Transform) GetWidth() int32 { if m != nil && m.Width != nil { return *m.Width } return 0 } func (m *Transform) GetHeight() int32 { if m != nil && m.Height != nil { return *m.Height } return 0 } func (m *Transform) GetCropToFit() bool { if m != nil && m.CropToFit != nil { return *m.CropToFit } return Default_Transform_CropToFit } func (m *Transform) GetCropOffsetX() float32 { if m != nil && m.CropOffsetX != nil { return *m.CropOffsetX } return Default_Transform_CropOffsetX } func (m *Transform) GetCropOffsetY() float32 { if m != nil && m.CropOffsetY != nil { return *m.CropOffsetY } return Default_Transform_CropOffsetY } func (m *Transform) GetRotate() int32 { if m != nil && m.Rotate != nil { return *m.Rotate } return Default_Transform_Rotate } func (m *Transform) GetHorizontalFlip() bool { if m != nil && m.HorizontalFlip != nil { return *m.HorizontalFlip } return Default_Transform_HorizontalFlip } func (m *Transform) GetVerticalFlip() bool { if m != nil && m.VerticalFlip != nil { return *m.VerticalFlip } return Default_Transform_VerticalFlip } func (m *Transform) GetCropLeftX() float32 { if m != nil && m.CropLeftX != nil { return *m.CropLeftX } return Default_Transform_CropLeftX } func (m *Transform) GetCropTopY() float32 { if m != nil && m.CropTopY != nil { return *m.CropTopY } return Default_Transform_CropTopY } func (m *Transform) GetCropRightX() float32 { if m != nil && m.CropRightX != nil { return *m.CropRightX } return Default_Transform_CropRightX } func (m *Transform) GetCropBottomY() float32 { if m != nil && m.CropBottomY != nil { return *m.CropBottomY } return Default_Transform_CropBottomY } func (m *Transform) GetAutolevels() bool { if m != nil && m.Autolevels != nil { return *m.Autolevels } return Default_Transform_Autolevels } func (m *Transform) GetAllowStretch() bool { if m != nil && m.AllowStretch != nil { return *m.AllowStretch } return Default_Transform_AllowStretch } type ImageData struct { Content []byte `protobuf:"bytes,1,req,name=content" json:"content,omitempty"` BlobKey *string `protobuf:"bytes,2,opt,name=blob_key" json:"blob_key,omitempty"` Width *int32 `protobuf:"varint,3,opt,name=width" json:"width,omitempty"` Height *int32 `protobuf:"varint,4,opt,name=height" json:"height,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *ImageData) Reset() { *m = ImageData{} } func (m *ImageData) String() string { return proto.CompactTextString(m) } func (*ImageData) ProtoMessage() {} func (m *ImageData) GetContent() []byte { if m != nil { return m.Content } return nil } func (m *ImageData) GetBlobKey() string { if m != nil && m.BlobKey != nil { return *m.BlobKey } return "" } func (m *ImageData) GetWidth() int32 { if m != nil && m.Width != nil { return *m.Width } return 0 } func (m *ImageData) GetHeight() int32 { if m != nil && m.Height != nil { return *m.Height } return 0 } type InputSettings struct { CorrectExifOrientation *InputSettings_ORIENTATION_CORRECTION_TYPE `protobuf:"varint,1,opt,name=correct_exif_orientation,enum=appengine.InputSettings_ORIENTATION_CORRECTION_TYPE,def=0" json:"correct_exif_orientation,omitempty"` ParseMetadata *bool `protobuf:"varint,2,opt,name=parse_metadata,def=0" json:"parse_metadata,omitempty"` TransparentSubstitutionRgb *int32 `protobuf:"varint,3,opt,name=transparent_substitution_rgb" json:"transparent_substitution_rgb,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *InputSettings) Reset() { *m = InputSettings{} } func (m *InputSettings) String() string { return proto.CompactTextString(m) } func (*InputSettings) ProtoMessage() {} const Default_InputSettings_CorrectExifOrientation InputSettings_ORIENTATION_CORRECTION_TYPE = InputSettings_UNCHANGED_ORIENTATION const Default_InputSettings_ParseMetadata bool = false func (m *InputSettings) GetCorrectExifOrientation() InputSettings_ORIENTATION_CORRECTION_TYPE { if m != nil && m.CorrectExifOrientation != nil { return *m.CorrectExifOrientation } return Default_InputSettings_CorrectExifOrientation } func (m *InputSettings) GetParseMetadata() bool { if m != nil && m.ParseMetadata != nil { return *m.ParseMetadata } return Default_InputSettings_ParseMetadata } func (m *InputSettings) GetTransparentSubstitutionRgb() int32 { if m != nil && m.TransparentSubstitutionRgb != nil { return *m.TransparentSubstitutionRgb } return 0 } type OutputSettings struct { MimeType *OutputSettings_MIME_TYPE `protobuf:"varint,1,opt,name=mime_type,enum=appengine.OutputSettings_MIME_TYPE,def=0" json:"mime_type,omitempty"` Quality *int32 `protobuf:"varint,2,opt,name=quality" json:"quality,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *OutputSettings) Reset() { *m = OutputSettings{} } func (m *OutputSettings) String() string { return proto.CompactTextString(m) } func (*OutputSettings) ProtoMessage() {} const Default_OutputSettings_MimeType OutputSettings_MIME_TYPE = OutputSettings_PNG func (m *OutputSettings) GetMimeType() OutputSettings_MIME_TYPE { if m != nil && m.MimeType != nil { return *m.MimeType } return Default_OutputSettings_MimeType } func (m *OutputSettings) GetQuality() int32 { if m != nil && m.Quality != nil { return *m.Quality } return 0 } type ImagesTransformRequest struct { Image *ImageData `protobuf:"bytes,1,req,name=image" json:"image,omitempty"` Transform []*Transform `protobuf:"bytes,2,rep,name=transform" json:"transform,omitempty"` Output *OutputSettings `protobuf:"bytes,3,req,name=output" json:"output,omitempty"` Input *InputSettings `protobuf:"bytes,4,opt,name=input" json:"input,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *ImagesTransformRequest) Reset() { *m = ImagesTransformRequest{} } func (m *ImagesTransformRequest) String() string { return proto.CompactTextString(m) } func (*ImagesTransformRequest) ProtoMessage() {} func (m *ImagesTransformRequest) GetImage() *ImageData { if m != nil { return m.Image } return nil } func (m *ImagesTransformRequest) GetTransform() []*Transform { if m != nil { return m.Transform } return nil } func (m *ImagesTransformRequest) GetOutput() *OutputSettings { if m != nil { return m.Output } return nil } func (m *ImagesTransformRequest) GetInput() *InputSettings { if m != nil { return m.Input } return nil } type ImagesTransformResponse struct { Image *ImageData `protobuf:"bytes,1,req,name=image" json:"image,omitempty"` SourceMetadata *string `protobuf:"bytes,2,opt,name=source_metadata" json:"source_metadata,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *ImagesTransformResponse) Reset() { *m = ImagesTransformResponse{} } func (m *ImagesTransformResponse) String() string { return proto.CompactTextString(m) } func (*ImagesTransformResponse) ProtoMessage() {} func (m *ImagesTransformResponse) GetImage() *ImageData { if m != nil { return m.Image } return nil } func (m *ImagesTransformResponse) GetSourceMetadata() string { if m != nil && m.SourceMetadata != nil { return *m.SourceMetadata } return "" } type CompositeImageOptions struct { SourceIndex *int32 `protobuf:"varint,1,req,name=source_index" json:"source_index,omitempty"` XOffset *int32 `protobuf:"varint,2,req,name=x_offset" json:"x_offset,omitempty"` YOffset *int32 `protobuf:"varint,3,req,name=y_offset" json:"y_offset,omitempty"` Opacity *float32 `protobuf:"fixed32,4,req,name=opacity" json:"opacity,omitempty"` Anchor *CompositeImageOptions_ANCHOR `protobuf:"varint,5,req,name=anchor,enum=appengine.CompositeImageOptions_ANCHOR" json:"anchor,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *CompositeImageOptions) Reset() { *m = CompositeImageOptions{} } func (m *CompositeImageOptions) String() string { return proto.CompactTextString(m) } func (*CompositeImageOptions) ProtoMessage() {} func (m *CompositeImageOptions) GetSourceIndex() int32 { if m != nil && m.SourceIndex != nil { return *m.SourceIndex } return 0 } func (m *CompositeImageOptions) GetXOffset() int32 { if m != nil && m.XOffset != nil { return *m.XOffset } return 0 } func (m *CompositeImageOptions) GetYOffset() int32 { if m != nil && m.YOffset != nil { return *m.YOffset } return 0 } func (m *CompositeImageOptions) GetOpacity() float32 { if m != nil && m.Opacity != nil { return *m.Opacity } return 0 } func (m *CompositeImageOptions) GetAnchor() CompositeImageOptions_ANCHOR { if m != nil && m.Anchor != nil { return *m.Anchor } return CompositeImageOptions_TOP_LEFT } type ImagesCanvas struct { Width *int32 `protobuf:"varint,1,req,name=width" json:"width,omitempty"` Height *int32 `protobuf:"varint,2,req,name=height" json:"height,omitempty"` Output *OutputSettings `protobuf:"bytes,3,req,name=output" json:"output,omitempty"` Color *int32 `protobuf:"varint,4,opt,name=color,def=-1" json:"color,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *ImagesCanvas) Reset() { *m = ImagesCanvas{} } func (m *ImagesCanvas) String() string { return proto.CompactTextString(m) } func (*ImagesCanvas) ProtoMessage() {} const Default_ImagesCanvas_Color int32 = -1 func (m *ImagesCanvas) GetWidth() int32 { if m != nil && m.Width != nil { return *m.Width } return 0 } func (m *ImagesCanvas) GetHeight() int32 { if m != nil && m.Height != nil { return *m.Height } return 0 } func (m *ImagesCanvas) GetOutput() *OutputSettings { if m != nil { return m.Output } return nil } func (m *ImagesCanvas) GetColor() int32 { if m != nil && m.Color != nil { return *m.Color } return Default_ImagesCanvas_Color } type ImagesCompositeRequest struct { Image []*ImageData `protobuf:"bytes,1,rep,name=image" json:"image,omitempty"` Options []*CompositeImageOptions `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` Canvas *ImagesCanvas `protobuf:"bytes,3,req,name=canvas" json:"canvas,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *ImagesCompositeRequest) Reset() { *m = ImagesCompositeRequest{} } func (m *ImagesCompositeRequest) String() string { return proto.CompactTextString(m) } func (*ImagesCompositeRequest) ProtoMessage() {} func (m *ImagesCompositeRequest) GetImage() []*ImageData { if m != nil { return m.Image } return nil } func (m *ImagesCompositeRequest) GetOptions() []*CompositeImageOptions { if m != nil { return m.Options } return nil } func (m *ImagesCompositeRequest) GetCanvas() *ImagesCanvas { if m != nil { return m.Canvas } return nil } type ImagesCompositeResponse struct { Image *ImageData `protobuf:"bytes,1,req,name=image" json:"image,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *ImagesCompositeResponse) Reset() { *m = ImagesCompositeResponse{} } func (m *ImagesCompositeResponse) String() string { return proto.CompactTextString(m) } func (*ImagesCompositeResponse) ProtoMessage() {} func (m *ImagesCompositeResponse) GetImage() *ImageData { if m != nil { return m.Image } return nil } type ImagesHistogramRequest struct { Image *ImageData `protobuf:"bytes,1,req,name=image" json:"image,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *ImagesHistogramRequest) Reset() { *m = ImagesHistogramRequest{} } func (m *ImagesHistogramRequest) String() string { return proto.CompactTextString(m) } func (*ImagesHistogramRequest) ProtoMessage() {} func (m *ImagesHistogramRequest) GetImage() *ImageData { if m != nil { return m.Image } return nil } type ImagesHistogram struct { Red []int32 `protobuf:"varint,1,rep,name=red" json:"red,omitempty"` Green []int32 `protobuf:"varint,2,rep,name=green" json:"green,omitempty"` Blue []int32 `protobuf:"varint,3,rep,name=blue" json:"blue,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *ImagesHistogram) Reset() { *m = ImagesHistogram{} } func (m *ImagesHistogram) String() string { return proto.CompactTextString(m) } func (*ImagesHistogram) ProtoMessage() {} func (m *ImagesHistogram) GetRed() []int32 { if m != nil { return m.Red } return nil } func (m *ImagesHistogram) GetGreen() []int32 { if m != nil { return m.Green } return nil } func (m *ImagesHistogram) GetBlue() []int32 { if m != nil { return m.Blue } return nil } type ImagesHistogramResponse struct { Histogram *ImagesHistogram `protobuf:"bytes,1,req,name=histogram" json:"histogram,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *ImagesHistogramResponse) Reset() { *m = ImagesHistogramResponse{} } func (m *ImagesHistogramResponse) String() string { return proto.CompactTextString(m) } func (*ImagesHistogramResponse) ProtoMessage() {} func (m *ImagesHistogramResponse) GetHistogram() *ImagesHistogram { if m != nil { return m.Histogram } return nil } type ImagesGetUrlBaseRequest struct { BlobKey *string `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"` CreateSecureUrl *bool `protobuf:"varint,2,opt,name=create_secure_url,def=0" json:"create_secure_url,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *ImagesGetUrlBaseRequest) Reset() { *m = ImagesGetUrlBaseRequest{} } func (m *ImagesGetUrlBaseRequest) String() string { return proto.CompactTextString(m) } func (*ImagesGetUrlBaseRequest) ProtoMessage() {} const Default_ImagesGetUrlBaseRequest_CreateSecureUrl bool = false func (m *ImagesGetUrlBaseRequest) GetBlobKey() string { if m != nil && m.BlobKey != nil { return *m.BlobKey } return "" } func (m *ImagesGetUrlBaseRequest) GetCreateSecureUrl() bool { if m != nil && m.CreateSecureUrl != nil { return *m.CreateSecureUrl } return Default_ImagesGetUrlBaseRequest_CreateSecureUrl } type ImagesGetUrlBaseResponse struct { Url *string `protobuf:"bytes,1,req,name=url" json:"url,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *ImagesGetUrlBaseResponse) Reset() { *m = ImagesGetUrlBaseResponse{} } func (m *ImagesGetUrlBaseResponse) String() string { return proto.CompactTextString(m) } func (*ImagesGetUrlBaseResponse) ProtoMessage() {} func (m *ImagesGetUrlBaseResponse) GetUrl() string { if m != nil && m.Url != nil { return *m.Url } return "" } type ImagesDeleteUrlBaseRequest struct { BlobKey *string `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *ImagesDeleteUrlBaseRequest) Reset() { *m = ImagesDeleteUrlBaseRequest{} } func (m *ImagesDeleteUrlBaseRequest) String() string { return proto.CompactTextString(m) } func (*ImagesDeleteUrlBaseRequest) ProtoMessage() {} func (m *ImagesDeleteUrlBaseRequest) GetBlobKey() string { if m != nil && m.BlobKey != nil { return *m.BlobKey } return "" } type ImagesDeleteUrlBaseResponse struct { XXX_unrecognized []byte `json:"-"` } func (m *ImagesDeleteUrlBaseResponse) Reset() { *m = ImagesDeleteUrlBaseResponse{} } func (m *ImagesDeleteUrlBaseResponse) String() string { return proto.CompactTextString(m) } func (*ImagesDeleteUrlBaseResponse) ProtoMessage() {} func init() { } ================================================ FILE: vendor/google.golang.org/appengine/internal/image/images_service.proto ================================================ syntax = "proto2"; option go_package = "image"; package appengine; message ImagesServiceError { enum ErrorCode { UNSPECIFIED_ERROR = 1; BAD_TRANSFORM_DATA = 2; NOT_IMAGE = 3; BAD_IMAGE_DATA = 4; IMAGE_TOO_LARGE = 5; INVALID_BLOB_KEY = 6; ACCESS_DENIED = 7; OBJECT_NOT_FOUND = 8; } } message ImagesServiceTransform { enum Type { RESIZE = 1; ROTATE = 2; HORIZONTAL_FLIP = 3; VERTICAL_FLIP = 4; CROP = 5; IM_FEELING_LUCKY = 6; } } message Transform { optional int32 width = 1; optional int32 height = 2; optional bool crop_to_fit = 11 [default = false]; optional float crop_offset_x = 12 [default = 0.5]; optional float crop_offset_y = 13 [default = 0.5]; optional int32 rotate = 3 [default = 0]; optional bool horizontal_flip = 4 [default = false]; optional bool vertical_flip = 5 [default = false]; optional float crop_left_x = 6 [default = 0.0]; optional float crop_top_y = 7 [default = 0.0]; optional float crop_right_x = 8 [default = 1.0]; optional float crop_bottom_y = 9 [default = 1.0]; optional bool autolevels = 10 [default = false]; optional bool allow_stretch = 14 [default = false]; } message ImageData { required bytes content = 1 [ctype=CORD]; optional string blob_key = 2; optional int32 width = 3; optional int32 height = 4; } message InputSettings { enum ORIENTATION_CORRECTION_TYPE { UNCHANGED_ORIENTATION = 0; CORRECT_ORIENTATION = 1; } optional ORIENTATION_CORRECTION_TYPE correct_exif_orientation = 1 [default=UNCHANGED_ORIENTATION]; optional bool parse_metadata = 2 [default=false]; optional int32 transparent_substitution_rgb = 3; } message OutputSettings { enum MIME_TYPE { PNG = 0; JPEG = 1; WEBP = 2; } optional MIME_TYPE mime_type = 1 [default=PNG]; optional int32 quality = 2; } message ImagesTransformRequest { required ImageData image = 1; repeated Transform transform = 2; required OutputSettings output = 3; optional InputSettings input = 4; } message ImagesTransformResponse { required ImageData image = 1; optional string source_metadata = 2; } message CompositeImageOptions { required int32 source_index = 1; required int32 x_offset = 2; required int32 y_offset = 3; required float opacity = 4; enum ANCHOR { TOP_LEFT = 0; TOP = 1; TOP_RIGHT = 2; LEFT = 3; CENTER = 4; RIGHT = 5; BOTTOM_LEFT = 6; BOTTOM = 7; BOTTOM_RIGHT = 8; } required ANCHOR anchor = 5; } message ImagesCanvas { required int32 width = 1; required int32 height = 2; required OutputSettings output = 3; optional int32 color = 4 [default=-1]; } message ImagesCompositeRequest { repeated ImageData image = 1; repeated CompositeImageOptions options = 2; required ImagesCanvas canvas = 3; } message ImagesCompositeResponse { required ImageData image = 1; } message ImagesHistogramRequest { required ImageData image = 1; } message ImagesHistogram { repeated int32 red = 1; repeated int32 green = 2; repeated int32 blue = 3; } message ImagesHistogramResponse { required ImagesHistogram histogram = 1; } message ImagesGetUrlBaseRequest { required string blob_key = 1; optional bool create_secure_url = 2 [default = false]; } message ImagesGetUrlBaseResponse { required string url = 1; } message ImagesDeleteUrlBaseRequest { required string blob_key = 1; } message ImagesDeleteUrlBaseResponse { } ================================================ FILE: vendor/google.golang.org/appengine/internal/internal.go ================================================ // Copyright 2011 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. // Package internal provides support for package appengine. // // Programs should not use this package directly. Its API is not stable. // Use packages appengine and appengine/* instead. package internal import ( "fmt" "io" "log" "net/http" "net/url" "os" "github.com/golang/protobuf/proto" remotepb "google.golang.org/appengine/internal/remote_api" ) // errorCodeMaps is a map of service name to the error code map for the service. var errorCodeMaps = make(map[string]map[int32]string) // RegisterErrorCodeMap is called from API implementations to register their // error code map. This should only be called from init functions. func RegisterErrorCodeMap(service string, m map[int32]string) { errorCodeMaps[service] = m } type timeoutCodeKey struct { service string code int32 } // timeoutCodes is the set of service+code pairs that represent timeouts. var timeoutCodes = make(map[timeoutCodeKey]bool) func RegisterTimeoutErrorCode(service string, code int32) { timeoutCodes[timeoutCodeKey{service, code}] = true } // APIError is the type returned by appengine.Context's Call method // when an API call fails in an API-specific way. This may be, for instance, // a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE. type APIError struct { Service string Detail string Code int32 // API-specific error code } func (e *APIError) Error() string { if e.Code == 0 { if e.Detail == "" { return "APIError " } return e.Detail } s := fmt.Sprintf("API error %d", e.Code) if m, ok := errorCodeMaps[e.Service]; ok { s += " (" + e.Service + ": " + m[e.Code] + ")" } else { // Shouldn't happen, but provide a bit more detail if it does. s = e.Service + " " + s } if e.Detail != "" { s += ": " + e.Detail } return s } func (e *APIError) IsTimeout() bool { return timeoutCodes[timeoutCodeKey{e.Service, e.Code}] } // CallError is the type returned by appengine.Context's Call method when an // API call fails in a generic way, such as RpcError::CAPABILITY_DISABLED. type CallError struct { Detail string Code int32 // TODO: Remove this if we get a distinguishable error code. Timeout bool } func (e *CallError) Error() string { var msg string switch remotepb.RpcError_ErrorCode(e.Code) { case remotepb.RpcError_UNKNOWN: return e.Detail case remotepb.RpcError_OVER_QUOTA: msg = "Over quota" case remotepb.RpcError_CAPABILITY_DISABLED: msg = "Capability disabled" case remotepb.RpcError_CANCELLED: msg = "Canceled" default: msg = fmt.Sprintf("Call error %d", e.Code) } s := msg + ": " + e.Detail if e.Timeout { s += " (timeout)" } return s } func (e *CallError) IsTimeout() bool { return e.Timeout } func Main() { installHealthChecker(http.DefaultServeMux) port := "8080" if s := os.Getenv("PORT"); s != "" { port = s } if err := http.ListenAndServe(":"+port, http.HandlerFunc(handleHTTP)); err != nil { log.Fatalf("http.ListenAndServe: %v", err) } } func installHealthChecker(mux *http.ServeMux) { // If no health check handler has been installed by this point, add a trivial one. const healthPath = "/_ah/health" hreq := &http.Request{ Method: "GET", URL: &url.URL{ Path: healthPath, }, } if _, pat := mux.Handler(hreq); pat != healthPath { mux.HandleFunc(healthPath, func(w http.ResponseWriter, r *http.Request) { io.WriteString(w, "ok") }) } } // NamespaceMods is a map from API service to a function that will mutate an RPC request to attach a namespace. // The function should be prepared to be called on the same message more than once; it should only modify the // RPC request the first time. var NamespaceMods = make(map[string]func(m proto.Message, namespace string)) ================================================ FILE: vendor/google.golang.org/appengine/internal/log/log_service.pb.go ================================================ // Code generated by protoc-gen-go. // source: google.golang.org/appengine/internal/log/log_service.proto // DO NOT EDIT! /* Package log is a generated protocol buffer package. It is generated from these files: google.golang.org/appengine/internal/log/log_service.proto It has these top-level messages: LogServiceError UserAppLogLine UserAppLogGroup FlushRequest SetStatusRequest LogOffset LogLine RequestLog LogModuleVersion LogReadRequest LogReadResponse LogUsageRecord LogUsageRequest LogUsageResponse */ package log import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf type LogServiceError_ErrorCode int32 const ( LogServiceError_OK LogServiceError_ErrorCode = 0 LogServiceError_INVALID_REQUEST LogServiceError_ErrorCode = 1 LogServiceError_STORAGE_ERROR LogServiceError_ErrorCode = 2 ) var LogServiceError_ErrorCode_name = map[int32]string{ 0: "OK", 1: "INVALID_REQUEST", 2: "STORAGE_ERROR", } var LogServiceError_ErrorCode_value = map[string]int32{ "OK": 0, "INVALID_REQUEST": 1, "STORAGE_ERROR": 2, } func (x LogServiceError_ErrorCode) Enum() *LogServiceError_ErrorCode { p := new(LogServiceError_ErrorCode) *p = x return p } func (x LogServiceError_ErrorCode) String() string { return proto.EnumName(LogServiceError_ErrorCode_name, int32(x)) } func (x *LogServiceError_ErrorCode) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(LogServiceError_ErrorCode_value, data, "LogServiceError_ErrorCode") if err != nil { return err } *x = LogServiceError_ErrorCode(value) return nil } type LogServiceError struct { XXX_unrecognized []byte `json:"-"` } func (m *LogServiceError) Reset() { *m = LogServiceError{} } func (m *LogServiceError) String() string { return proto.CompactTextString(m) } func (*LogServiceError) ProtoMessage() {} type UserAppLogLine struct { TimestampUsec *int64 `protobuf:"varint,1,req,name=timestamp_usec" json:"timestamp_usec,omitempty"` Level *int64 `protobuf:"varint,2,req,name=level" json:"level,omitempty"` Message *string `protobuf:"bytes,3,req,name=message" json:"message,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *UserAppLogLine) Reset() { *m = UserAppLogLine{} } func (m *UserAppLogLine) String() string { return proto.CompactTextString(m) } func (*UserAppLogLine) ProtoMessage() {} func (m *UserAppLogLine) GetTimestampUsec() int64 { if m != nil && m.TimestampUsec != nil { return *m.TimestampUsec } return 0 } func (m *UserAppLogLine) GetLevel() int64 { if m != nil && m.Level != nil { return *m.Level } return 0 } func (m *UserAppLogLine) GetMessage() string { if m != nil && m.Message != nil { return *m.Message } return "" } type UserAppLogGroup struct { LogLine []*UserAppLogLine `protobuf:"bytes,2,rep,name=log_line" json:"log_line,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *UserAppLogGroup) Reset() { *m = UserAppLogGroup{} } func (m *UserAppLogGroup) String() string { return proto.CompactTextString(m) } func (*UserAppLogGroup) ProtoMessage() {} func (m *UserAppLogGroup) GetLogLine() []*UserAppLogLine { if m != nil { return m.LogLine } return nil } type FlushRequest struct { Logs []byte `protobuf:"bytes,1,opt,name=logs" json:"logs,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *FlushRequest) Reset() { *m = FlushRequest{} } func (m *FlushRequest) String() string { return proto.CompactTextString(m) } func (*FlushRequest) ProtoMessage() {} func (m *FlushRequest) GetLogs() []byte { if m != nil { return m.Logs } return nil } type SetStatusRequest struct { Status *string `protobuf:"bytes,1,req,name=status" json:"status,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *SetStatusRequest) Reset() { *m = SetStatusRequest{} } func (m *SetStatusRequest) String() string { return proto.CompactTextString(m) } func (*SetStatusRequest) ProtoMessage() {} func (m *SetStatusRequest) GetStatus() string { if m != nil && m.Status != nil { return *m.Status } return "" } type LogOffset struct { RequestId []byte `protobuf:"bytes,1,opt,name=request_id" json:"request_id,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *LogOffset) Reset() { *m = LogOffset{} } func (m *LogOffset) String() string { return proto.CompactTextString(m) } func (*LogOffset) ProtoMessage() {} func (m *LogOffset) GetRequestId() []byte { if m != nil { return m.RequestId } return nil } type LogLine struct { Time *int64 `protobuf:"varint,1,req,name=time" json:"time,omitempty"` Level *int32 `protobuf:"varint,2,req,name=level" json:"level,omitempty"` LogMessage *string `protobuf:"bytes,3,req,name=log_message" json:"log_message,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *LogLine) Reset() { *m = LogLine{} } func (m *LogLine) String() string { return proto.CompactTextString(m) } func (*LogLine) ProtoMessage() {} func (m *LogLine) GetTime() int64 { if m != nil && m.Time != nil { return *m.Time } return 0 } func (m *LogLine) GetLevel() int32 { if m != nil && m.Level != nil { return *m.Level } return 0 } func (m *LogLine) GetLogMessage() string { if m != nil && m.LogMessage != nil { return *m.LogMessage } return "" } type RequestLog struct { AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` ModuleId *string `protobuf:"bytes,37,opt,name=module_id,def=default" json:"module_id,omitempty"` VersionId *string `protobuf:"bytes,2,req,name=version_id" json:"version_id,omitempty"` RequestId []byte `protobuf:"bytes,3,req,name=request_id" json:"request_id,omitempty"` Offset *LogOffset `protobuf:"bytes,35,opt,name=offset" json:"offset,omitempty"` Ip *string `protobuf:"bytes,4,req,name=ip" json:"ip,omitempty"` Nickname *string `protobuf:"bytes,5,opt,name=nickname" json:"nickname,omitempty"` StartTime *int64 `protobuf:"varint,6,req,name=start_time" json:"start_time,omitempty"` EndTime *int64 `protobuf:"varint,7,req,name=end_time" json:"end_time,omitempty"` Latency *int64 `protobuf:"varint,8,req,name=latency" json:"latency,omitempty"` Mcycles *int64 `protobuf:"varint,9,req,name=mcycles" json:"mcycles,omitempty"` Method *string `protobuf:"bytes,10,req,name=method" json:"method,omitempty"` Resource *string `protobuf:"bytes,11,req,name=resource" json:"resource,omitempty"` HttpVersion *string `protobuf:"bytes,12,req,name=http_version" json:"http_version,omitempty"` Status *int32 `protobuf:"varint,13,req,name=status" json:"status,omitempty"` ResponseSize *int64 `protobuf:"varint,14,req,name=response_size" json:"response_size,omitempty"` Referrer *string `protobuf:"bytes,15,opt,name=referrer" json:"referrer,omitempty"` UserAgent *string `protobuf:"bytes,16,opt,name=user_agent" json:"user_agent,omitempty"` UrlMapEntry *string `protobuf:"bytes,17,req,name=url_map_entry" json:"url_map_entry,omitempty"` Combined *string `protobuf:"bytes,18,req,name=combined" json:"combined,omitempty"` ApiMcycles *int64 `protobuf:"varint,19,opt,name=api_mcycles" json:"api_mcycles,omitempty"` Host *string `protobuf:"bytes,20,opt,name=host" json:"host,omitempty"` Cost *float64 `protobuf:"fixed64,21,opt,name=cost" json:"cost,omitempty"` TaskQueueName *string `protobuf:"bytes,22,opt,name=task_queue_name" json:"task_queue_name,omitempty"` TaskName *string `protobuf:"bytes,23,opt,name=task_name" json:"task_name,omitempty"` WasLoadingRequest *bool `protobuf:"varint,24,opt,name=was_loading_request" json:"was_loading_request,omitempty"` PendingTime *int64 `protobuf:"varint,25,opt,name=pending_time" json:"pending_time,omitempty"` ReplicaIndex *int32 `protobuf:"varint,26,opt,name=replica_index,def=-1" json:"replica_index,omitempty"` Finished *bool `protobuf:"varint,27,opt,name=finished,def=1" json:"finished,omitempty"` CloneKey []byte `protobuf:"bytes,28,opt,name=clone_key" json:"clone_key,omitempty"` Line []*LogLine `protobuf:"bytes,29,rep,name=line" json:"line,omitempty"` LinesIncomplete *bool `protobuf:"varint,36,opt,name=lines_incomplete" json:"lines_incomplete,omitempty"` AppEngineRelease []byte `protobuf:"bytes,38,opt,name=app_engine_release" json:"app_engine_release,omitempty"` ExitReason *int32 `protobuf:"varint,30,opt,name=exit_reason" json:"exit_reason,omitempty"` WasThrottledForTime *bool `protobuf:"varint,31,opt,name=was_throttled_for_time" json:"was_throttled_for_time,omitempty"` WasThrottledForRequests *bool `protobuf:"varint,32,opt,name=was_throttled_for_requests" json:"was_throttled_for_requests,omitempty"` ThrottledTime *int64 `protobuf:"varint,33,opt,name=throttled_time" json:"throttled_time,omitempty"` ServerName []byte `protobuf:"bytes,34,opt,name=server_name" json:"server_name,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *RequestLog) Reset() { *m = RequestLog{} } func (m *RequestLog) String() string { return proto.CompactTextString(m) } func (*RequestLog) ProtoMessage() {} const Default_RequestLog_ModuleId string = "default" const Default_RequestLog_ReplicaIndex int32 = -1 const Default_RequestLog_Finished bool = true func (m *RequestLog) GetAppId() string { if m != nil && m.AppId != nil { return *m.AppId } return "" } func (m *RequestLog) GetModuleId() string { if m != nil && m.ModuleId != nil { return *m.ModuleId } return Default_RequestLog_ModuleId } func (m *RequestLog) GetVersionId() string { if m != nil && m.VersionId != nil { return *m.VersionId } return "" } func (m *RequestLog) GetRequestId() []byte { if m != nil { return m.RequestId } return nil } func (m *RequestLog) GetOffset() *LogOffset { if m != nil { return m.Offset } return nil } func (m *RequestLog) GetIp() string { if m != nil && m.Ip != nil { return *m.Ip } return "" } func (m *RequestLog) GetNickname() string { if m != nil && m.Nickname != nil { return *m.Nickname } return "" } func (m *RequestLog) GetStartTime() int64 { if m != nil && m.StartTime != nil { return *m.StartTime } return 0 } func (m *RequestLog) GetEndTime() int64 { if m != nil && m.EndTime != nil { return *m.EndTime } return 0 } func (m *RequestLog) GetLatency() int64 { if m != nil && m.Latency != nil { return *m.Latency } return 0 } func (m *RequestLog) GetMcycles() int64 { if m != nil && m.Mcycles != nil { return *m.Mcycles } return 0 } func (m *RequestLog) GetMethod() string { if m != nil && m.Method != nil { return *m.Method } return "" } func (m *RequestLog) GetResource() string { if m != nil && m.Resource != nil { return *m.Resource } return "" } func (m *RequestLog) GetHttpVersion() string { if m != nil && m.HttpVersion != nil { return *m.HttpVersion } return "" } func (m *RequestLog) GetStatus() int32 { if m != nil && m.Status != nil { return *m.Status } return 0 } func (m *RequestLog) GetResponseSize() int64 { if m != nil && m.ResponseSize != nil { return *m.ResponseSize } return 0 } func (m *RequestLog) GetReferrer() string { if m != nil && m.Referrer != nil { return *m.Referrer } return "" } func (m *RequestLog) GetUserAgent() string { if m != nil && m.UserAgent != nil { return *m.UserAgent } return "" } func (m *RequestLog) GetUrlMapEntry() string { if m != nil && m.UrlMapEntry != nil { return *m.UrlMapEntry } return "" } func (m *RequestLog) GetCombined() string { if m != nil && m.Combined != nil { return *m.Combined } return "" } func (m *RequestLog) GetApiMcycles() int64 { if m != nil && m.ApiMcycles != nil { return *m.ApiMcycles } return 0 } func (m *RequestLog) GetHost() string { if m != nil && m.Host != nil { return *m.Host } return "" } func (m *RequestLog) GetCost() float64 { if m != nil && m.Cost != nil { return *m.Cost } return 0 } func (m *RequestLog) GetTaskQueueName() string { if m != nil && m.TaskQueueName != nil { return *m.TaskQueueName } return "" } func (m *RequestLog) GetTaskName() string { if m != nil && m.TaskName != nil { return *m.TaskName } return "" } func (m *RequestLog) GetWasLoadingRequest() bool { if m != nil && m.WasLoadingRequest != nil { return *m.WasLoadingRequest } return false } func (m *RequestLog) GetPendingTime() int64 { if m != nil && m.PendingTime != nil { return *m.PendingTime } return 0 } func (m *RequestLog) GetReplicaIndex() int32 { if m != nil && m.ReplicaIndex != nil { return *m.ReplicaIndex } return Default_RequestLog_ReplicaIndex } func (m *RequestLog) GetFinished() bool { if m != nil && m.Finished != nil { return *m.Finished } return Default_RequestLog_Finished } func (m *RequestLog) GetCloneKey() []byte { if m != nil { return m.CloneKey } return nil } func (m *RequestLog) GetLine() []*LogLine { if m != nil { return m.Line } return nil } func (m *RequestLog) GetLinesIncomplete() bool { if m != nil && m.LinesIncomplete != nil { return *m.LinesIncomplete } return false } func (m *RequestLog) GetAppEngineRelease() []byte { if m != nil { return m.AppEngineRelease } return nil } func (m *RequestLog) GetExitReason() int32 { if m != nil && m.ExitReason != nil { return *m.ExitReason } return 0 } func (m *RequestLog) GetWasThrottledForTime() bool { if m != nil && m.WasThrottledForTime != nil { return *m.WasThrottledForTime } return false } func (m *RequestLog) GetWasThrottledForRequests() bool { if m != nil && m.WasThrottledForRequests != nil { return *m.WasThrottledForRequests } return false } func (m *RequestLog) GetThrottledTime() int64 { if m != nil && m.ThrottledTime != nil { return *m.ThrottledTime } return 0 } func (m *RequestLog) GetServerName() []byte { if m != nil { return m.ServerName } return nil } type LogModuleVersion struct { ModuleId *string `protobuf:"bytes,1,opt,name=module_id,def=default" json:"module_id,omitempty"` VersionId *string `protobuf:"bytes,2,opt,name=version_id" json:"version_id,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *LogModuleVersion) Reset() { *m = LogModuleVersion{} } func (m *LogModuleVersion) String() string { return proto.CompactTextString(m) } func (*LogModuleVersion) ProtoMessage() {} const Default_LogModuleVersion_ModuleId string = "default" func (m *LogModuleVersion) GetModuleId() string { if m != nil && m.ModuleId != nil { return *m.ModuleId } return Default_LogModuleVersion_ModuleId } func (m *LogModuleVersion) GetVersionId() string { if m != nil && m.VersionId != nil { return *m.VersionId } return "" } type LogReadRequest struct { AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` VersionId []string `protobuf:"bytes,2,rep,name=version_id" json:"version_id,omitempty"` ModuleVersion []*LogModuleVersion `protobuf:"bytes,19,rep,name=module_version" json:"module_version,omitempty"` StartTime *int64 `protobuf:"varint,3,opt,name=start_time" json:"start_time,omitempty"` EndTime *int64 `protobuf:"varint,4,opt,name=end_time" json:"end_time,omitempty"` Offset *LogOffset `protobuf:"bytes,5,opt,name=offset" json:"offset,omitempty"` RequestId [][]byte `protobuf:"bytes,6,rep,name=request_id" json:"request_id,omitempty"` MinimumLogLevel *int32 `protobuf:"varint,7,opt,name=minimum_log_level" json:"minimum_log_level,omitempty"` IncludeIncomplete *bool `protobuf:"varint,8,opt,name=include_incomplete" json:"include_incomplete,omitempty"` Count *int64 `protobuf:"varint,9,opt,name=count" json:"count,omitempty"` CombinedLogRegex *string `protobuf:"bytes,14,opt,name=combined_log_regex" json:"combined_log_regex,omitempty"` HostRegex *string `protobuf:"bytes,15,opt,name=host_regex" json:"host_regex,omitempty"` ReplicaIndex *int32 `protobuf:"varint,16,opt,name=replica_index" json:"replica_index,omitempty"` IncludeAppLogs *bool `protobuf:"varint,10,opt,name=include_app_logs" json:"include_app_logs,omitempty"` AppLogsPerRequest *int32 `protobuf:"varint,17,opt,name=app_logs_per_request" json:"app_logs_per_request,omitempty"` IncludeHost *bool `protobuf:"varint,11,opt,name=include_host" json:"include_host,omitempty"` IncludeAll *bool `protobuf:"varint,12,opt,name=include_all" json:"include_all,omitempty"` CacheIterator *bool `protobuf:"varint,13,opt,name=cache_iterator" json:"cache_iterator,omitempty"` NumShards *int32 `protobuf:"varint,18,opt,name=num_shards" json:"num_shards,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *LogReadRequest) Reset() { *m = LogReadRequest{} } func (m *LogReadRequest) String() string { return proto.CompactTextString(m) } func (*LogReadRequest) ProtoMessage() {} func (m *LogReadRequest) GetAppId() string { if m != nil && m.AppId != nil { return *m.AppId } return "" } func (m *LogReadRequest) GetVersionId() []string { if m != nil { return m.VersionId } return nil } func (m *LogReadRequest) GetModuleVersion() []*LogModuleVersion { if m != nil { return m.ModuleVersion } return nil } func (m *LogReadRequest) GetStartTime() int64 { if m != nil && m.StartTime != nil { return *m.StartTime } return 0 } func (m *LogReadRequest) GetEndTime() int64 { if m != nil && m.EndTime != nil { return *m.EndTime } return 0 } func (m *LogReadRequest) GetOffset() *LogOffset { if m != nil { return m.Offset } return nil } func (m *LogReadRequest) GetRequestId() [][]byte { if m != nil { return m.RequestId } return nil } func (m *LogReadRequest) GetMinimumLogLevel() int32 { if m != nil && m.MinimumLogLevel != nil { return *m.MinimumLogLevel } return 0 } func (m *LogReadRequest) GetIncludeIncomplete() bool { if m != nil && m.IncludeIncomplete != nil { return *m.IncludeIncomplete } return false } func (m *LogReadRequest) GetCount() int64 { if m != nil && m.Count != nil { return *m.Count } return 0 } func (m *LogReadRequest) GetCombinedLogRegex() string { if m != nil && m.CombinedLogRegex != nil { return *m.CombinedLogRegex } return "" } func (m *LogReadRequest) GetHostRegex() string { if m != nil && m.HostRegex != nil { return *m.HostRegex } return "" } func (m *LogReadRequest) GetReplicaIndex() int32 { if m != nil && m.ReplicaIndex != nil { return *m.ReplicaIndex } return 0 } func (m *LogReadRequest) GetIncludeAppLogs() bool { if m != nil && m.IncludeAppLogs != nil { return *m.IncludeAppLogs } return false } func (m *LogReadRequest) GetAppLogsPerRequest() int32 { if m != nil && m.AppLogsPerRequest != nil { return *m.AppLogsPerRequest } return 0 } func (m *LogReadRequest) GetIncludeHost() bool { if m != nil && m.IncludeHost != nil { return *m.IncludeHost } return false } func (m *LogReadRequest) GetIncludeAll() bool { if m != nil && m.IncludeAll != nil { return *m.IncludeAll } return false } func (m *LogReadRequest) GetCacheIterator() bool { if m != nil && m.CacheIterator != nil { return *m.CacheIterator } return false } func (m *LogReadRequest) GetNumShards() int32 { if m != nil && m.NumShards != nil { return *m.NumShards } return 0 } type LogReadResponse struct { Log []*RequestLog `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"` Offset *LogOffset `protobuf:"bytes,2,opt,name=offset" json:"offset,omitempty"` LastEndTime *int64 `protobuf:"varint,3,opt,name=last_end_time" json:"last_end_time,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *LogReadResponse) Reset() { *m = LogReadResponse{} } func (m *LogReadResponse) String() string { return proto.CompactTextString(m) } func (*LogReadResponse) ProtoMessage() {} func (m *LogReadResponse) GetLog() []*RequestLog { if m != nil { return m.Log } return nil } func (m *LogReadResponse) GetOffset() *LogOffset { if m != nil { return m.Offset } return nil } func (m *LogReadResponse) GetLastEndTime() int64 { if m != nil && m.LastEndTime != nil { return *m.LastEndTime } return 0 } type LogUsageRecord struct { VersionId *string `protobuf:"bytes,1,opt,name=version_id" json:"version_id,omitempty"` StartTime *int32 `protobuf:"varint,2,opt,name=start_time" json:"start_time,omitempty"` EndTime *int32 `protobuf:"varint,3,opt,name=end_time" json:"end_time,omitempty"` Count *int64 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"` TotalSize *int64 `protobuf:"varint,5,opt,name=total_size" json:"total_size,omitempty"` Records *int32 `protobuf:"varint,6,opt,name=records" json:"records,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *LogUsageRecord) Reset() { *m = LogUsageRecord{} } func (m *LogUsageRecord) String() string { return proto.CompactTextString(m) } func (*LogUsageRecord) ProtoMessage() {} func (m *LogUsageRecord) GetVersionId() string { if m != nil && m.VersionId != nil { return *m.VersionId } return "" } func (m *LogUsageRecord) GetStartTime() int32 { if m != nil && m.StartTime != nil { return *m.StartTime } return 0 } func (m *LogUsageRecord) GetEndTime() int32 { if m != nil && m.EndTime != nil { return *m.EndTime } return 0 } func (m *LogUsageRecord) GetCount() int64 { if m != nil && m.Count != nil { return *m.Count } return 0 } func (m *LogUsageRecord) GetTotalSize() int64 { if m != nil && m.TotalSize != nil { return *m.TotalSize } return 0 } func (m *LogUsageRecord) GetRecords() int32 { if m != nil && m.Records != nil { return *m.Records } return 0 } type LogUsageRequest struct { AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` VersionId []string `protobuf:"bytes,2,rep,name=version_id" json:"version_id,omitempty"` StartTime *int32 `protobuf:"varint,3,opt,name=start_time" json:"start_time,omitempty"` EndTime *int32 `protobuf:"varint,4,opt,name=end_time" json:"end_time,omitempty"` ResolutionHours *uint32 `protobuf:"varint,5,opt,name=resolution_hours,def=1" json:"resolution_hours,omitempty"` CombineVersions *bool `protobuf:"varint,6,opt,name=combine_versions" json:"combine_versions,omitempty"` UsageVersion *int32 `protobuf:"varint,7,opt,name=usage_version" json:"usage_version,omitempty"` VersionsOnly *bool `protobuf:"varint,8,opt,name=versions_only" json:"versions_only,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *LogUsageRequest) Reset() { *m = LogUsageRequest{} } func (m *LogUsageRequest) String() string { return proto.CompactTextString(m) } func (*LogUsageRequest) ProtoMessage() {} const Default_LogUsageRequest_ResolutionHours uint32 = 1 func (m *LogUsageRequest) GetAppId() string { if m != nil && m.AppId != nil { return *m.AppId } return "" } func (m *LogUsageRequest) GetVersionId() []string { if m != nil { return m.VersionId } return nil } func (m *LogUsageRequest) GetStartTime() int32 { if m != nil && m.StartTime != nil { return *m.StartTime } return 0 } func (m *LogUsageRequest) GetEndTime() int32 { if m != nil && m.EndTime != nil { return *m.EndTime } return 0 } func (m *LogUsageRequest) GetResolutionHours() uint32 { if m != nil && m.ResolutionHours != nil { return *m.ResolutionHours } return Default_LogUsageRequest_ResolutionHours } func (m *LogUsageRequest) GetCombineVersions() bool { if m != nil && m.CombineVersions != nil { return *m.CombineVersions } return false } func (m *LogUsageRequest) GetUsageVersion() int32 { if m != nil && m.UsageVersion != nil { return *m.UsageVersion } return 0 } func (m *LogUsageRequest) GetVersionsOnly() bool { if m != nil && m.VersionsOnly != nil { return *m.VersionsOnly } return false } type LogUsageResponse struct { Usage []*LogUsageRecord `protobuf:"bytes,1,rep,name=usage" json:"usage,omitempty"` Summary *LogUsageRecord `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *LogUsageResponse) Reset() { *m = LogUsageResponse{} } func (m *LogUsageResponse) String() string { return proto.CompactTextString(m) } func (*LogUsageResponse) ProtoMessage() {} func (m *LogUsageResponse) GetUsage() []*LogUsageRecord { if m != nil { return m.Usage } return nil } func (m *LogUsageResponse) GetSummary() *LogUsageRecord { if m != nil { return m.Summary } return nil } func init() { } ================================================ FILE: vendor/google.golang.org/appengine/internal/log/log_service.proto ================================================ syntax = "proto2"; option go_package = "log"; package appengine; message LogServiceError { enum ErrorCode { OK = 0; INVALID_REQUEST = 1; STORAGE_ERROR = 2; } } message UserAppLogLine { required int64 timestamp_usec = 1; required int64 level = 2; required string message = 3; } message UserAppLogGroup { repeated UserAppLogLine log_line = 2; } message FlushRequest { optional bytes logs = 1; } message SetStatusRequest { required string status = 1; } message LogOffset { optional bytes request_id = 1; } message LogLine { required int64 time = 1; required int32 level = 2; required string log_message = 3; } message RequestLog { required string app_id = 1; optional string module_id = 37 [default="default"]; required string version_id = 2; required bytes request_id = 3; optional LogOffset offset = 35; required string ip = 4; optional string nickname = 5; required int64 start_time = 6; required int64 end_time = 7; required int64 latency = 8; required int64 mcycles = 9; required string method = 10; required string resource = 11; required string http_version = 12; required int32 status = 13; required int64 response_size = 14; optional string referrer = 15; optional string user_agent = 16; required string url_map_entry = 17; required string combined = 18; optional int64 api_mcycles = 19; optional string host = 20; optional double cost = 21; optional string task_queue_name = 22; optional string task_name = 23; optional bool was_loading_request = 24; optional int64 pending_time = 25; optional int32 replica_index = 26 [default = -1]; optional bool finished = 27 [default = true]; optional bytes clone_key = 28; repeated LogLine line = 29; optional bool lines_incomplete = 36; optional bytes app_engine_release = 38; optional int32 exit_reason = 30; optional bool was_throttled_for_time = 31; optional bool was_throttled_for_requests = 32; optional int64 throttled_time = 33; optional bytes server_name = 34; } message LogModuleVersion { optional string module_id = 1 [default="default"]; optional string version_id = 2; } message LogReadRequest { required string app_id = 1; repeated string version_id = 2; repeated LogModuleVersion module_version = 19; optional int64 start_time = 3; optional int64 end_time = 4; optional LogOffset offset = 5; repeated bytes request_id = 6; optional int32 minimum_log_level = 7; optional bool include_incomplete = 8; optional int64 count = 9; optional string combined_log_regex = 14; optional string host_regex = 15; optional int32 replica_index = 16; optional bool include_app_logs = 10; optional int32 app_logs_per_request = 17; optional bool include_host = 11; optional bool include_all = 12; optional bool cache_iterator = 13; optional int32 num_shards = 18; } message LogReadResponse { repeated RequestLog log = 1; optional LogOffset offset = 2; optional int64 last_end_time = 3; } message LogUsageRecord { optional string version_id = 1; optional int32 start_time = 2; optional int32 end_time = 3; optional int64 count = 4; optional int64 total_size = 5; optional int32 records = 6; } message LogUsageRequest { required string app_id = 1; repeated string version_id = 2; optional int32 start_time = 3; optional int32 end_time = 4; optional uint32 resolution_hours = 5 [default = 1]; optional bool combine_versions = 6; optional int32 usage_version = 7; optional bool versions_only = 8; } message LogUsageResponse { repeated LogUsageRecord usage = 1; optional LogUsageRecord summary = 2; } ================================================ FILE: vendor/google.golang.org/appengine/internal/mail/mail_service.pb.go ================================================ // Code generated by protoc-gen-go. // source: google.golang.org/appengine/internal/mail/mail_service.proto // DO NOT EDIT! /* Package mail is a generated protocol buffer package. It is generated from these files: google.golang.org/appengine/internal/mail/mail_service.proto It has these top-level messages: MailServiceError MailAttachment MailHeader MailMessage */ package mail import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf type MailServiceError_ErrorCode int32 const ( MailServiceError_OK MailServiceError_ErrorCode = 0 MailServiceError_INTERNAL_ERROR MailServiceError_ErrorCode = 1 MailServiceError_BAD_REQUEST MailServiceError_ErrorCode = 2 MailServiceError_UNAUTHORIZED_SENDER MailServiceError_ErrorCode = 3 MailServiceError_INVALID_ATTACHMENT_TYPE MailServiceError_ErrorCode = 4 MailServiceError_INVALID_HEADER_NAME MailServiceError_ErrorCode = 5 MailServiceError_INVALID_CONTENT_ID MailServiceError_ErrorCode = 6 ) var MailServiceError_ErrorCode_name = map[int32]string{ 0: "OK", 1: "INTERNAL_ERROR", 2: "BAD_REQUEST", 3: "UNAUTHORIZED_SENDER", 4: "INVALID_ATTACHMENT_TYPE", 5: "INVALID_HEADER_NAME", 6: "INVALID_CONTENT_ID", } var MailServiceError_ErrorCode_value = map[string]int32{ "OK": 0, "INTERNAL_ERROR": 1, "BAD_REQUEST": 2, "UNAUTHORIZED_SENDER": 3, "INVALID_ATTACHMENT_TYPE": 4, "INVALID_HEADER_NAME": 5, "INVALID_CONTENT_ID": 6, } func (x MailServiceError_ErrorCode) Enum() *MailServiceError_ErrorCode { p := new(MailServiceError_ErrorCode) *p = x return p } func (x MailServiceError_ErrorCode) String() string { return proto.EnumName(MailServiceError_ErrorCode_name, int32(x)) } func (x *MailServiceError_ErrorCode) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(MailServiceError_ErrorCode_value, data, "MailServiceError_ErrorCode") if err != nil { return err } *x = MailServiceError_ErrorCode(value) return nil } type MailServiceError struct { XXX_unrecognized []byte `json:"-"` } func (m *MailServiceError) Reset() { *m = MailServiceError{} } func (m *MailServiceError) String() string { return proto.CompactTextString(m) } func (*MailServiceError) ProtoMessage() {} type MailAttachment struct { FileName *string `protobuf:"bytes,1,req,name=FileName" json:"FileName,omitempty"` Data []byte `protobuf:"bytes,2,req,name=Data" json:"Data,omitempty"` ContentID *string `protobuf:"bytes,3,opt,name=ContentID" json:"ContentID,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *MailAttachment) Reset() { *m = MailAttachment{} } func (m *MailAttachment) String() string { return proto.CompactTextString(m) } func (*MailAttachment) ProtoMessage() {} func (m *MailAttachment) GetFileName() string { if m != nil && m.FileName != nil { return *m.FileName } return "" } func (m *MailAttachment) GetData() []byte { if m != nil { return m.Data } return nil } func (m *MailAttachment) GetContentID() string { if m != nil && m.ContentID != nil { return *m.ContentID } return "" } type MailHeader struct { Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` Value *string `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *MailHeader) Reset() { *m = MailHeader{} } func (m *MailHeader) String() string { return proto.CompactTextString(m) } func (*MailHeader) ProtoMessage() {} func (m *MailHeader) GetName() string { if m != nil && m.Name != nil { return *m.Name } return "" } func (m *MailHeader) GetValue() string { if m != nil && m.Value != nil { return *m.Value } return "" } type MailMessage struct { Sender *string `protobuf:"bytes,1,req,name=Sender" json:"Sender,omitempty"` ReplyTo *string `protobuf:"bytes,2,opt,name=ReplyTo" json:"ReplyTo,omitempty"` To []string `protobuf:"bytes,3,rep,name=To" json:"To,omitempty"` Cc []string `protobuf:"bytes,4,rep,name=Cc" json:"Cc,omitempty"` Bcc []string `protobuf:"bytes,5,rep,name=Bcc" json:"Bcc,omitempty"` Subject *string `protobuf:"bytes,6,req,name=Subject" json:"Subject,omitempty"` TextBody *string `protobuf:"bytes,7,opt,name=TextBody" json:"TextBody,omitempty"` HtmlBody *string `protobuf:"bytes,8,opt,name=HtmlBody" json:"HtmlBody,omitempty"` Attachment []*MailAttachment `protobuf:"bytes,9,rep,name=Attachment" json:"Attachment,omitempty"` Header []*MailHeader `protobuf:"bytes,10,rep,name=Header" json:"Header,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *MailMessage) Reset() { *m = MailMessage{} } func (m *MailMessage) String() string { return proto.CompactTextString(m) } func (*MailMessage) ProtoMessage() {} func (m *MailMessage) GetSender() string { if m != nil && m.Sender != nil { return *m.Sender } return "" } func (m *MailMessage) GetReplyTo() string { if m != nil && m.ReplyTo != nil { return *m.ReplyTo } return "" } func (m *MailMessage) GetTo() []string { if m != nil { return m.To } return nil } func (m *MailMessage) GetCc() []string { if m != nil { return m.Cc } return nil } func (m *MailMessage) GetBcc() []string { if m != nil { return m.Bcc } return nil } func (m *MailMessage) GetSubject() string { if m != nil && m.Subject != nil { return *m.Subject } return "" } func (m *MailMessage) GetTextBody() string { if m != nil && m.TextBody != nil { return *m.TextBody } return "" } func (m *MailMessage) GetHtmlBody() string { if m != nil && m.HtmlBody != nil { return *m.HtmlBody } return "" } func (m *MailMessage) GetAttachment() []*MailAttachment { if m != nil { return m.Attachment } return nil } func (m *MailMessage) GetHeader() []*MailHeader { if m != nil { return m.Header } return nil } func init() { } ================================================ FILE: vendor/google.golang.org/appengine/internal/mail/mail_service.proto ================================================ syntax = "proto2"; option go_package = "mail"; package appengine; message MailServiceError { enum ErrorCode { OK = 0; INTERNAL_ERROR = 1; BAD_REQUEST = 2; UNAUTHORIZED_SENDER = 3; INVALID_ATTACHMENT_TYPE = 4; INVALID_HEADER_NAME = 5; INVALID_CONTENT_ID = 6; } } message MailAttachment { required string FileName = 1; required bytes Data = 2; optional string ContentID = 3; } message MailHeader { required string name = 1; required string value = 2; } message MailMessage { required string Sender = 1; optional string ReplyTo = 2; repeated string To = 3; repeated string Cc = 4; repeated string Bcc = 5; required string Subject = 6; optional string TextBody = 7; optional string HtmlBody = 8; repeated MailAttachment Attachment = 9; repeated MailHeader Header = 10; } ================================================ FILE: vendor/google.golang.org/appengine/internal/memcache/memcache_service.pb.go ================================================ // Code generated by protoc-gen-go. // source: google.golang.org/appengine/internal/memcache/memcache_service.proto // DO NOT EDIT! /* Package memcache is a generated protocol buffer package. It is generated from these files: google.golang.org/appengine/internal/memcache/memcache_service.proto It has these top-level messages: MemcacheServiceError AppOverride MemcacheGetRequest MemcacheGetResponse MemcacheSetRequest MemcacheSetResponse MemcacheDeleteRequest MemcacheDeleteResponse MemcacheIncrementRequest MemcacheIncrementResponse MemcacheBatchIncrementRequest MemcacheBatchIncrementResponse MemcacheFlushRequest MemcacheFlushResponse MemcacheStatsRequest MergedNamespaceStats MemcacheStatsResponse MemcacheGrabTailRequest MemcacheGrabTailResponse */ package memcache import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf type MemcacheServiceError_ErrorCode int32 const ( MemcacheServiceError_OK MemcacheServiceError_ErrorCode = 0 MemcacheServiceError_UNSPECIFIED_ERROR MemcacheServiceError_ErrorCode = 1 MemcacheServiceError_NAMESPACE_NOT_SET MemcacheServiceError_ErrorCode = 2 MemcacheServiceError_PERMISSION_DENIED MemcacheServiceError_ErrorCode = 3 MemcacheServiceError_INVALID_VALUE MemcacheServiceError_ErrorCode = 6 ) var MemcacheServiceError_ErrorCode_name = map[int32]string{ 0: "OK", 1: "UNSPECIFIED_ERROR", 2: "NAMESPACE_NOT_SET", 3: "PERMISSION_DENIED", 6: "INVALID_VALUE", } var MemcacheServiceError_ErrorCode_value = map[string]int32{ "OK": 0, "UNSPECIFIED_ERROR": 1, "NAMESPACE_NOT_SET": 2, "PERMISSION_DENIED": 3, "INVALID_VALUE": 6, } func (x MemcacheServiceError_ErrorCode) Enum() *MemcacheServiceError_ErrorCode { p := new(MemcacheServiceError_ErrorCode) *p = x return p } func (x MemcacheServiceError_ErrorCode) String() string { return proto.EnumName(MemcacheServiceError_ErrorCode_name, int32(x)) } func (x *MemcacheServiceError_ErrorCode) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(MemcacheServiceError_ErrorCode_value, data, "MemcacheServiceError_ErrorCode") if err != nil { return err } *x = MemcacheServiceError_ErrorCode(value) return nil } type MemcacheSetRequest_SetPolicy int32 const ( MemcacheSetRequest_SET MemcacheSetRequest_SetPolicy = 1 MemcacheSetRequest_ADD MemcacheSetRequest_SetPolicy = 2 MemcacheSetRequest_REPLACE MemcacheSetRequest_SetPolicy = 3 MemcacheSetRequest_CAS MemcacheSetRequest_SetPolicy = 4 ) var MemcacheSetRequest_SetPolicy_name = map[int32]string{ 1: "SET", 2: "ADD", 3: "REPLACE", 4: "CAS", } var MemcacheSetRequest_SetPolicy_value = map[string]int32{ "SET": 1, "ADD": 2, "REPLACE": 3, "CAS": 4, } func (x MemcacheSetRequest_SetPolicy) Enum() *MemcacheSetRequest_SetPolicy { p := new(MemcacheSetRequest_SetPolicy) *p = x return p } func (x MemcacheSetRequest_SetPolicy) String() string { return proto.EnumName(MemcacheSetRequest_SetPolicy_name, int32(x)) } func (x *MemcacheSetRequest_SetPolicy) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(MemcacheSetRequest_SetPolicy_value, data, "MemcacheSetRequest_SetPolicy") if err != nil { return err } *x = MemcacheSetRequest_SetPolicy(value) return nil } type MemcacheSetResponse_SetStatusCode int32 const ( MemcacheSetResponse_STORED MemcacheSetResponse_SetStatusCode = 1 MemcacheSetResponse_NOT_STORED MemcacheSetResponse_SetStatusCode = 2 MemcacheSetResponse_ERROR MemcacheSetResponse_SetStatusCode = 3 MemcacheSetResponse_EXISTS MemcacheSetResponse_SetStatusCode = 4 ) var MemcacheSetResponse_SetStatusCode_name = map[int32]string{ 1: "STORED", 2: "NOT_STORED", 3: "ERROR", 4: "EXISTS", } var MemcacheSetResponse_SetStatusCode_value = map[string]int32{ "STORED": 1, "NOT_STORED": 2, "ERROR": 3, "EXISTS": 4, } func (x MemcacheSetResponse_SetStatusCode) Enum() *MemcacheSetResponse_SetStatusCode { p := new(MemcacheSetResponse_SetStatusCode) *p = x return p } func (x MemcacheSetResponse_SetStatusCode) String() string { return proto.EnumName(MemcacheSetResponse_SetStatusCode_name, int32(x)) } func (x *MemcacheSetResponse_SetStatusCode) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(MemcacheSetResponse_SetStatusCode_value, data, "MemcacheSetResponse_SetStatusCode") if err != nil { return err } *x = MemcacheSetResponse_SetStatusCode(value) return nil } type MemcacheDeleteResponse_DeleteStatusCode int32 const ( MemcacheDeleteResponse_DELETED MemcacheDeleteResponse_DeleteStatusCode = 1 MemcacheDeleteResponse_NOT_FOUND MemcacheDeleteResponse_DeleteStatusCode = 2 ) var MemcacheDeleteResponse_DeleteStatusCode_name = map[int32]string{ 1: "DELETED", 2: "NOT_FOUND", } var MemcacheDeleteResponse_DeleteStatusCode_value = map[string]int32{ "DELETED": 1, "NOT_FOUND": 2, } func (x MemcacheDeleteResponse_DeleteStatusCode) Enum() *MemcacheDeleteResponse_DeleteStatusCode { p := new(MemcacheDeleteResponse_DeleteStatusCode) *p = x return p } func (x MemcacheDeleteResponse_DeleteStatusCode) String() string { return proto.EnumName(MemcacheDeleteResponse_DeleteStatusCode_name, int32(x)) } func (x *MemcacheDeleteResponse_DeleteStatusCode) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(MemcacheDeleteResponse_DeleteStatusCode_value, data, "MemcacheDeleteResponse_DeleteStatusCode") if err != nil { return err } *x = MemcacheDeleteResponse_DeleteStatusCode(value) return nil } type MemcacheIncrementRequest_Direction int32 const ( MemcacheIncrementRequest_INCREMENT MemcacheIncrementRequest_Direction = 1 MemcacheIncrementRequest_DECREMENT MemcacheIncrementRequest_Direction = 2 ) var MemcacheIncrementRequest_Direction_name = map[int32]string{ 1: "INCREMENT", 2: "DECREMENT", } var MemcacheIncrementRequest_Direction_value = map[string]int32{ "INCREMENT": 1, "DECREMENT": 2, } func (x MemcacheIncrementRequest_Direction) Enum() *MemcacheIncrementRequest_Direction { p := new(MemcacheIncrementRequest_Direction) *p = x return p } func (x MemcacheIncrementRequest_Direction) String() string { return proto.EnumName(MemcacheIncrementRequest_Direction_name, int32(x)) } func (x *MemcacheIncrementRequest_Direction) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(MemcacheIncrementRequest_Direction_value, data, "MemcacheIncrementRequest_Direction") if err != nil { return err } *x = MemcacheIncrementRequest_Direction(value) return nil } type MemcacheIncrementResponse_IncrementStatusCode int32 const ( MemcacheIncrementResponse_OK MemcacheIncrementResponse_IncrementStatusCode = 1 MemcacheIncrementResponse_NOT_CHANGED MemcacheIncrementResponse_IncrementStatusCode = 2 MemcacheIncrementResponse_ERROR MemcacheIncrementResponse_IncrementStatusCode = 3 ) var MemcacheIncrementResponse_IncrementStatusCode_name = map[int32]string{ 1: "OK", 2: "NOT_CHANGED", 3: "ERROR", } var MemcacheIncrementResponse_IncrementStatusCode_value = map[string]int32{ "OK": 1, "NOT_CHANGED": 2, "ERROR": 3, } func (x MemcacheIncrementResponse_IncrementStatusCode) Enum() *MemcacheIncrementResponse_IncrementStatusCode { p := new(MemcacheIncrementResponse_IncrementStatusCode) *p = x return p } func (x MemcacheIncrementResponse_IncrementStatusCode) String() string { return proto.EnumName(MemcacheIncrementResponse_IncrementStatusCode_name, int32(x)) } func (x *MemcacheIncrementResponse_IncrementStatusCode) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(MemcacheIncrementResponse_IncrementStatusCode_value, data, "MemcacheIncrementResponse_IncrementStatusCode") if err != nil { return err } *x = MemcacheIncrementResponse_IncrementStatusCode(value) return nil } type MemcacheServiceError struct { XXX_unrecognized []byte `json:"-"` } func (m *MemcacheServiceError) Reset() { *m = MemcacheServiceError{} } func (m *MemcacheServiceError) String() string { return proto.CompactTextString(m) } func (*MemcacheServiceError) ProtoMessage() {} type AppOverride struct { AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` NumMemcachegBackends *int32 `protobuf:"varint,2,opt,name=num_memcacheg_backends" json:"num_memcacheg_backends,omitempty"` IgnoreShardlock *bool `protobuf:"varint,3,opt,name=ignore_shardlock" json:"ignore_shardlock,omitempty"` MemcachePoolHint *string `protobuf:"bytes,4,opt,name=memcache_pool_hint" json:"memcache_pool_hint,omitempty"` MemcacheShardingStrategy []byte `protobuf:"bytes,5,opt,name=memcache_sharding_strategy" json:"memcache_sharding_strategy,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *AppOverride) Reset() { *m = AppOverride{} } func (m *AppOverride) String() string { return proto.CompactTextString(m) } func (*AppOverride) ProtoMessage() {} func (m *AppOverride) GetAppId() string { if m != nil && m.AppId != nil { return *m.AppId } return "" } func (m *AppOverride) GetNumMemcachegBackends() int32 { if m != nil && m.NumMemcachegBackends != nil { return *m.NumMemcachegBackends } return 0 } func (m *AppOverride) GetIgnoreShardlock() bool { if m != nil && m.IgnoreShardlock != nil { return *m.IgnoreShardlock } return false } func (m *AppOverride) GetMemcachePoolHint() string { if m != nil && m.MemcachePoolHint != nil { return *m.MemcachePoolHint } return "" } func (m *AppOverride) GetMemcacheShardingStrategy() []byte { if m != nil { return m.MemcacheShardingStrategy } return nil } type MemcacheGetRequest struct { Key [][]byte `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` NameSpace *string `protobuf:"bytes,2,opt,name=name_space,def=" json:"name_space,omitempty"` ForCas *bool `protobuf:"varint,4,opt,name=for_cas" json:"for_cas,omitempty"` Override *AppOverride `protobuf:"bytes,5,opt,name=override" json:"override,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *MemcacheGetRequest) Reset() { *m = MemcacheGetRequest{} } func (m *MemcacheGetRequest) String() string { return proto.CompactTextString(m) } func (*MemcacheGetRequest) ProtoMessage() {} func (m *MemcacheGetRequest) GetKey() [][]byte { if m != nil { return m.Key } return nil } func (m *MemcacheGetRequest) GetNameSpace() string { if m != nil && m.NameSpace != nil { return *m.NameSpace } return "" } func (m *MemcacheGetRequest) GetForCas() bool { if m != nil && m.ForCas != nil { return *m.ForCas } return false } func (m *MemcacheGetRequest) GetOverride() *AppOverride { if m != nil { return m.Override } return nil } type MemcacheGetResponse struct { Item []*MemcacheGetResponse_Item `protobuf:"group,1,rep,name=Item" json:"item,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *MemcacheGetResponse) Reset() { *m = MemcacheGetResponse{} } func (m *MemcacheGetResponse) String() string { return proto.CompactTextString(m) } func (*MemcacheGetResponse) ProtoMessage() {} func (m *MemcacheGetResponse) GetItem() []*MemcacheGetResponse_Item { if m != nil { return m.Item } return nil } type MemcacheGetResponse_Item struct { Key []byte `protobuf:"bytes,2,req,name=key" json:"key,omitempty"` Value []byte `protobuf:"bytes,3,req,name=value" json:"value,omitempty"` Flags *uint32 `protobuf:"fixed32,4,opt,name=flags" json:"flags,omitempty"` CasId *uint64 `protobuf:"fixed64,5,opt,name=cas_id" json:"cas_id,omitempty"` ExpiresInSeconds *int32 `protobuf:"varint,6,opt,name=expires_in_seconds" json:"expires_in_seconds,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *MemcacheGetResponse_Item) Reset() { *m = MemcacheGetResponse_Item{} } func (m *MemcacheGetResponse_Item) String() string { return proto.CompactTextString(m) } func (*MemcacheGetResponse_Item) ProtoMessage() {} func (m *MemcacheGetResponse_Item) GetKey() []byte { if m != nil { return m.Key } return nil } func (m *MemcacheGetResponse_Item) GetValue() []byte { if m != nil { return m.Value } return nil } func (m *MemcacheGetResponse_Item) GetFlags() uint32 { if m != nil && m.Flags != nil { return *m.Flags } return 0 } func (m *MemcacheGetResponse_Item) GetCasId() uint64 { if m != nil && m.CasId != nil { return *m.CasId } return 0 } func (m *MemcacheGetResponse_Item) GetExpiresInSeconds() int32 { if m != nil && m.ExpiresInSeconds != nil { return *m.ExpiresInSeconds } return 0 } type MemcacheSetRequest struct { Item []*MemcacheSetRequest_Item `protobuf:"group,1,rep,name=Item" json:"item,omitempty"` NameSpace *string `protobuf:"bytes,7,opt,name=name_space,def=" json:"name_space,omitempty"` Override *AppOverride `protobuf:"bytes,10,opt,name=override" json:"override,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *MemcacheSetRequest) Reset() { *m = MemcacheSetRequest{} } func (m *MemcacheSetRequest) String() string { return proto.CompactTextString(m) } func (*MemcacheSetRequest) ProtoMessage() {} func (m *MemcacheSetRequest) GetItem() []*MemcacheSetRequest_Item { if m != nil { return m.Item } return nil } func (m *MemcacheSetRequest) GetNameSpace() string { if m != nil && m.NameSpace != nil { return *m.NameSpace } return "" } func (m *MemcacheSetRequest) GetOverride() *AppOverride { if m != nil { return m.Override } return nil } type MemcacheSetRequest_Item struct { Key []byte `protobuf:"bytes,2,req,name=key" json:"key,omitempty"` Value []byte `protobuf:"bytes,3,req,name=value" json:"value,omitempty"` Flags *uint32 `protobuf:"fixed32,4,opt,name=flags" json:"flags,omitempty"` SetPolicy *MemcacheSetRequest_SetPolicy `protobuf:"varint,5,opt,name=set_policy,enum=appengine.MemcacheSetRequest_SetPolicy,def=1" json:"set_policy,omitempty"` ExpirationTime *uint32 `protobuf:"fixed32,6,opt,name=expiration_time,def=0" json:"expiration_time,omitempty"` CasId *uint64 `protobuf:"fixed64,8,opt,name=cas_id" json:"cas_id,omitempty"` ForCas *bool `protobuf:"varint,9,opt,name=for_cas" json:"for_cas,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *MemcacheSetRequest_Item) Reset() { *m = MemcacheSetRequest_Item{} } func (m *MemcacheSetRequest_Item) String() string { return proto.CompactTextString(m) } func (*MemcacheSetRequest_Item) ProtoMessage() {} const Default_MemcacheSetRequest_Item_SetPolicy MemcacheSetRequest_SetPolicy = MemcacheSetRequest_SET const Default_MemcacheSetRequest_Item_ExpirationTime uint32 = 0 func (m *MemcacheSetRequest_Item) GetKey() []byte { if m != nil { return m.Key } return nil } func (m *MemcacheSetRequest_Item) GetValue() []byte { if m != nil { return m.Value } return nil } func (m *MemcacheSetRequest_Item) GetFlags() uint32 { if m != nil && m.Flags != nil { return *m.Flags } return 0 } func (m *MemcacheSetRequest_Item) GetSetPolicy() MemcacheSetRequest_SetPolicy { if m != nil && m.SetPolicy != nil { return *m.SetPolicy } return Default_MemcacheSetRequest_Item_SetPolicy } func (m *MemcacheSetRequest_Item) GetExpirationTime() uint32 { if m != nil && m.ExpirationTime != nil { return *m.ExpirationTime } return Default_MemcacheSetRequest_Item_ExpirationTime } func (m *MemcacheSetRequest_Item) GetCasId() uint64 { if m != nil && m.CasId != nil { return *m.CasId } return 0 } func (m *MemcacheSetRequest_Item) GetForCas() bool { if m != nil && m.ForCas != nil { return *m.ForCas } return false } type MemcacheSetResponse struct { SetStatus []MemcacheSetResponse_SetStatusCode `protobuf:"varint,1,rep,name=set_status,enum=appengine.MemcacheSetResponse_SetStatusCode" json:"set_status,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *MemcacheSetResponse) Reset() { *m = MemcacheSetResponse{} } func (m *MemcacheSetResponse) String() string { return proto.CompactTextString(m) } func (*MemcacheSetResponse) ProtoMessage() {} func (m *MemcacheSetResponse) GetSetStatus() []MemcacheSetResponse_SetStatusCode { if m != nil { return m.SetStatus } return nil } type MemcacheDeleteRequest struct { Item []*MemcacheDeleteRequest_Item `protobuf:"group,1,rep,name=Item" json:"item,omitempty"` NameSpace *string `protobuf:"bytes,4,opt,name=name_space,def=" json:"name_space,omitempty"` Override *AppOverride `protobuf:"bytes,5,opt,name=override" json:"override,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *MemcacheDeleteRequest) Reset() { *m = MemcacheDeleteRequest{} } func (m *MemcacheDeleteRequest) String() string { return proto.CompactTextString(m) } func (*MemcacheDeleteRequest) ProtoMessage() {} func (m *MemcacheDeleteRequest) GetItem() []*MemcacheDeleteRequest_Item { if m != nil { return m.Item } return nil } func (m *MemcacheDeleteRequest) GetNameSpace() string { if m != nil && m.NameSpace != nil { return *m.NameSpace } return "" } func (m *MemcacheDeleteRequest) GetOverride() *AppOverride { if m != nil { return m.Override } return nil } type MemcacheDeleteRequest_Item struct { Key []byte `protobuf:"bytes,2,req,name=key" json:"key,omitempty"` DeleteTime *uint32 `protobuf:"fixed32,3,opt,name=delete_time,def=0" json:"delete_time,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *MemcacheDeleteRequest_Item) Reset() { *m = MemcacheDeleteRequest_Item{} } func (m *MemcacheDeleteRequest_Item) String() string { return proto.CompactTextString(m) } func (*MemcacheDeleteRequest_Item) ProtoMessage() {} const Default_MemcacheDeleteRequest_Item_DeleteTime uint32 = 0 func (m *MemcacheDeleteRequest_Item) GetKey() []byte { if m != nil { return m.Key } return nil } func (m *MemcacheDeleteRequest_Item) GetDeleteTime() uint32 { if m != nil && m.DeleteTime != nil { return *m.DeleteTime } return Default_MemcacheDeleteRequest_Item_DeleteTime } type MemcacheDeleteResponse struct { DeleteStatus []MemcacheDeleteResponse_DeleteStatusCode `protobuf:"varint,1,rep,name=delete_status,enum=appengine.MemcacheDeleteResponse_DeleteStatusCode" json:"delete_status,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *MemcacheDeleteResponse) Reset() { *m = MemcacheDeleteResponse{} } func (m *MemcacheDeleteResponse) String() string { return proto.CompactTextString(m) } func (*MemcacheDeleteResponse) ProtoMessage() {} func (m *MemcacheDeleteResponse) GetDeleteStatus() []MemcacheDeleteResponse_DeleteStatusCode { if m != nil { return m.DeleteStatus } return nil } type MemcacheIncrementRequest struct { Key []byte `protobuf:"bytes,1,req,name=key" json:"key,omitempty"` NameSpace *string `protobuf:"bytes,4,opt,name=name_space,def=" json:"name_space,omitempty"` Delta *uint64 `protobuf:"varint,2,opt,name=delta,def=1" json:"delta,omitempty"` Direction *MemcacheIncrementRequest_Direction `protobuf:"varint,3,opt,name=direction,enum=appengine.MemcacheIncrementRequest_Direction,def=1" json:"direction,omitempty"` InitialValue *uint64 `protobuf:"varint,5,opt,name=initial_value" json:"initial_value,omitempty"` InitialFlags *uint32 `protobuf:"fixed32,6,opt,name=initial_flags" json:"initial_flags,omitempty"` Override *AppOverride `protobuf:"bytes,7,opt,name=override" json:"override,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *MemcacheIncrementRequest) Reset() { *m = MemcacheIncrementRequest{} } func (m *MemcacheIncrementRequest) String() string { return proto.CompactTextString(m) } func (*MemcacheIncrementRequest) ProtoMessage() {} const Default_MemcacheIncrementRequest_Delta uint64 = 1 const Default_MemcacheIncrementRequest_Direction MemcacheIncrementRequest_Direction = MemcacheIncrementRequest_INCREMENT func (m *MemcacheIncrementRequest) GetKey() []byte { if m != nil { return m.Key } return nil } func (m *MemcacheIncrementRequest) GetNameSpace() string { if m != nil && m.NameSpace != nil { return *m.NameSpace } return "" } func (m *MemcacheIncrementRequest) GetDelta() uint64 { if m != nil && m.Delta != nil { return *m.Delta } return Default_MemcacheIncrementRequest_Delta } func (m *MemcacheIncrementRequest) GetDirection() MemcacheIncrementRequest_Direction { if m != nil && m.Direction != nil { return *m.Direction } return Default_MemcacheIncrementRequest_Direction } func (m *MemcacheIncrementRequest) GetInitialValue() uint64 { if m != nil && m.InitialValue != nil { return *m.InitialValue } return 0 } func (m *MemcacheIncrementRequest) GetInitialFlags() uint32 { if m != nil && m.InitialFlags != nil { return *m.InitialFlags } return 0 } func (m *MemcacheIncrementRequest) GetOverride() *AppOverride { if m != nil { return m.Override } return nil } type MemcacheIncrementResponse struct { NewValue *uint64 `protobuf:"varint,1,opt,name=new_value" json:"new_value,omitempty"` IncrementStatus *MemcacheIncrementResponse_IncrementStatusCode `protobuf:"varint,2,opt,name=increment_status,enum=appengine.MemcacheIncrementResponse_IncrementStatusCode" json:"increment_status,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *MemcacheIncrementResponse) Reset() { *m = MemcacheIncrementResponse{} } func (m *MemcacheIncrementResponse) String() string { return proto.CompactTextString(m) } func (*MemcacheIncrementResponse) ProtoMessage() {} func (m *MemcacheIncrementResponse) GetNewValue() uint64 { if m != nil && m.NewValue != nil { return *m.NewValue } return 0 } func (m *MemcacheIncrementResponse) GetIncrementStatus() MemcacheIncrementResponse_IncrementStatusCode { if m != nil && m.IncrementStatus != nil { return *m.IncrementStatus } return MemcacheIncrementResponse_OK } type MemcacheBatchIncrementRequest struct { NameSpace *string `protobuf:"bytes,1,opt,name=name_space,def=" json:"name_space,omitempty"` Item []*MemcacheIncrementRequest `protobuf:"bytes,2,rep,name=item" json:"item,omitempty"` Override *AppOverride `protobuf:"bytes,3,opt,name=override" json:"override,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *MemcacheBatchIncrementRequest) Reset() { *m = MemcacheBatchIncrementRequest{} } func (m *MemcacheBatchIncrementRequest) String() string { return proto.CompactTextString(m) } func (*MemcacheBatchIncrementRequest) ProtoMessage() {} func (m *MemcacheBatchIncrementRequest) GetNameSpace() string { if m != nil && m.NameSpace != nil { return *m.NameSpace } return "" } func (m *MemcacheBatchIncrementRequest) GetItem() []*MemcacheIncrementRequest { if m != nil { return m.Item } return nil } func (m *MemcacheBatchIncrementRequest) GetOverride() *AppOverride { if m != nil { return m.Override } return nil } type MemcacheBatchIncrementResponse struct { Item []*MemcacheIncrementResponse `protobuf:"bytes,1,rep,name=item" json:"item,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *MemcacheBatchIncrementResponse) Reset() { *m = MemcacheBatchIncrementResponse{} } func (m *MemcacheBatchIncrementResponse) String() string { return proto.CompactTextString(m) } func (*MemcacheBatchIncrementResponse) ProtoMessage() {} func (m *MemcacheBatchIncrementResponse) GetItem() []*MemcacheIncrementResponse { if m != nil { return m.Item } return nil } type MemcacheFlushRequest struct { Override *AppOverride `protobuf:"bytes,1,opt,name=override" json:"override,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *MemcacheFlushRequest) Reset() { *m = MemcacheFlushRequest{} } func (m *MemcacheFlushRequest) String() string { return proto.CompactTextString(m) } func (*MemcacheFlushRequest) ProtoMessage() {} func (m *MemcacheFlushRequest) GetOverride() *AppOverride { if m != nil { return m.Override } return nil } type MemcacheFlushResponse struct { XXX_unrecognized []byte `json:"-"` } func (m *MemcacheFlushResponse) Reset() { *m = MemcacheFlushResponse{} } func (m *MemcacheFlushResponse) String() string { return proto.CompactTextString(m) } func (*MemcacheFlushResponse) ProtoMessage() {} type MemcacheStatsRequest struct { Override *AppOverride `protobuf:"bytes,1,opt,name=override" json:"override,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *MemcacheStatsRequest) Reset() { *m = MemcacheStatsRequest{} } func (m *MemcacheStatsRequest) String() string { return proto.CompactTextString(m) } func (*MemcacheStatsRequest) ProtoMessage() {} func (m *MemcacheStatsRequest) GetOverride() *AppOverride { if m != nil { return m.Override } return nil } type MergedNamespaceStats struct { Hits *uint64 `protobuf:"varint,1,req,name=hits" json:"hits,omitempty"` Misses *uint64 `protobuf:"varint,2,req,name=misses" json:"misses,omitempty"` ByteHits *uint64 `protobuf:"varint,3,req,name=byte_hits" json:"byte_hits,omitempty"` Items *uint64 `protobuf:"varint,4,req,name=items" json:"items,omitempty"` Bytes *uint64 `protobuf:"varint,5,req,name=bytes" json:"bytes,omitempty"` OldestItemAge *uint32 `protobuf:"fixed32,6,req,name=oldest_item_age" json:"oldest_item_age,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *MergedNamespaceStats) Reset() { *m = MergedNamespaceStats{} } func (m *MergedNamespaceStats) String() string { return proto.CompactTextString(m) } func (*MergedNamespaceStats) ProtoMessage() {} func (m *MergedNamespaceStats) GetHits() uint64 { if m != nil && m.Hits != nil { return *m.Hits } return 0 } func (m *MergedNamespaceStats) GetMisses() uint64 { if m != nil && m.Misses != nil { return *m.Misses } return 0 } func (m *MergedNamespaceStats) GetByteHits() uint64 { if m != nil && m.ByteHits != nil { return *m.ByteHits } return 0 } func (m *MergedNamespaceStats) GetItems() uint64 { if m != nil && m.Items != nil { return *m.Items } return 0 } func (m *MergedNamespaceStats) GetBytes() uint64 { if m != nil && m.Bytes != nil { return *m.Bytes } return 0 } func (m *MergedNamespaceStats) GetOldestItemAge() uint32 { if m != nil && m.OldestItemAge != nil { return *m.OldestItemAge } return 0 } type MemcacheStatsResponse struct { Stats *MergedNamespaceStats `protobuf:"bytes,1,opt,name=stats" json:"stats,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *MemcacheStatsResponse) Reset() { *m = MemcacheStatsResponse{} } func (m *MemcacheStatsResponse) String() string { return proto.CompactTextString(m) } func (*MemcacheStatsResponse) ProtoMessage() {} func (m *MemcacheStatsResponse) GetStats() *MergedNamespaceStats { if m != nil { return m.Stats } return nil } type MemcacheGrabTailRequest struct { ItemCount *int32 `protobuf:"varint,1,req,name=item_count" json:"item_count,omitempty"` NameSpace *string `protobuf:"bytes,2,opt,name=name_space,def=" json:"name_space,omitempty"` Override *AppOverride `protobuf:"bytes,3,opt,name=override" json:"override,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *MemcacheGrabTailRequest) Reset() { *m = MemcacheGrabTailRequest{} } func (m *MemcacheGrabTailRequest) String() string { return proto.CompactTextString(m) } func (*MemcacheGrabTailRequest) ProtoMessage() {} func (m *MemcacheGrabTailRequest) GetItemCount() int32 { if m != nil && m.ItemCount != nil { return *m.ItemCount } return 0 } func (m *MemcacheGrabTailRequest) GetNameSpace() string { if m != nil && m.NameSpace != nil { return *m.NameSpace } return "" } func (m *MemcacheGrabTailRequest) GetOverride() *AppOverride { if m != nil { return m.Override } return nil } type MemcacheGrabTailResponse struct { Item []*MemcacheGrabTailResponse_Item `protobuf:"group,1,rep,name=Item" json:"item,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *MemcacheGrabTailResponse) Reset() { *m = MemcacheGrabTailResponse{} } func (m *MemcacheGrabTailResponse) String() string { return proto.CompactTextString(m) } func (*MemcacheGrabTailResponse) ProtoMessage() {} func (m *MemcacheGrabTailResponse) GetItem() []*MemcacheGrabTailResponse_Item { if m != nil { return m.Item } return nil } type MemcacheGrabTailResponse_Item struct { Value []byte `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` Flags *uint32 `protobuf:"fixed32,3,opt,name=flags" json:"flags,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *MemcacheGrabTailResponse_Item) Reset() { *m = MemcacheGrabTailResponse_Item{} } func (m *MemcacheGrabTailResponse_Item) String() string { return proto.CompactTextString(m) } func (*MemcacheGrabTailResponse_Item) ProtoMessage() {} func (m *MemcacheGrabTailResponse_Item) GetValue() []byte { if m != nil { return m.Value } return nil } func (m *MemcacheGrabTailResponse_Item) GetFlags() uint32 { if m != nil && m.Flags != nil { return *m.Flags } return 0 } func init() { } ================================================ FILE: vendor/google.golang.org/appengine/internal/memcache/memcache_service.proto ================================================ syntax = "proto2"; option go_package = "memcache"; package appengine; message MemcacheServiceError { enum ErrorCode { OK = 0; UNSPECIFIED_ERROR = 1; NAMESPACE_NOT_SET = 2; PERMISSION_DENIED = 3; INVALID_VALUE = 6; } } message AppOverride { required string app_id = 1; optional int32 num_memcacheg_backends = 2 [deprecated=true]; optional bool ignore_shardlock = 3 [deprecated=true]; optional string memcache_pool_hint = 4 [deprecated=true]; optional bytes memcache_sharding_strategy = 5 [deprecated=true]; } message MemcacheGetRequest { repeated bytes key = 1; optional string name_space = 2 [default = ""]; optional bool for_cas = 4; optional AppOverride override = 5; } message MemcacheGetResponse { repeated group Item = 1 { required bytes key = 2; required bytes value = 3; optional fixed32 flags = 4; optional fixed64 cas_id = 5; optional int32 expires_in_seconds = 6; } } message MemcacheSetRequest { enum SetPolicy { SET = 1; ADD = 2; REPLACE = 3; CAS = 4; } repeated group Item = 1 { required bytes key = 2; required bytes value = 3; optional fixed32 flags = 4; optional SetPolicy set_policy = 5 [default = SET]; optional fixed32 expiration_time = 6 [default = 0]; optional fixed64 cas_id = 8; optional bool for_cas = 9; } optional string name_space = 7 [default = ""]; optional AppOverride override = 10; } message MemcacheSetResponse { enum SetStatusCode { STORED = 1; NOT_STORED = 2; ERROR = 3; EXISTS = 4; } repeated SetStatusCode set_status = 1; } message MemcacheDeleteRequest { repeated group Item = 1 { required bytes key = 2; optional fixed32 delete_time = 3 [default = 0]; } optional string name_space = 4 [default = ""]; optional AppOverride override = 5; } message MemcacheDeleteResponse { enum DeleteStatusCode { DELETED = 1; NOT_FOUND = 2; } repeated DeleteStatusCode delete_status = 1; } message MemcacheIncrementRequest { enum Direction { INCREMENT = 1; DECREMENT = 2; } required bytes key = 1; optional string name_space = 4 [default = ""]; optional uint64 delta = 2 [default = 1]; optional Direction direction = 3 [default = INCREMENT]; optional uint64 initial_value = 5; optional fixed32 initial_flags = 6; optional AppOverride override = 7; } message MemcacheIncrementResponse { enum IncrementStatusCode { OK = 1; NOT_CHANGED = 2; ERROR = 3; } optional uint64 new_value = 1; optional IncrementStatusCode increment_status = 2; } message MemcacheBatchIncrementRequest { optional string name_space = 1 [default = ""]; repeated MemcacheIncrementRequest item = 2; optional AppOverride override = 3; } message MemcacheBatchIncrementResponse { repeated MemcacheIncrementResponse item = 1; } message MemcacheFlushRequest { optional AppOverride override = 1; } message MemcacheFlushResponse { } message MemcacheStatsRequest { optional AppOverride override = 1; } message MergedNamespaceStats { required uint64 hits = 1; required uint64 misses = 2; required uint64 byte_hits = 3; required uint64 items = 4; required uint64 bytes = 5; required fixed32 oldest_item_age = 6; } message MemcacheStatsResponse { optional MergedNamespaceStats stats = 1; } message MemcacheGrabTailRequest { required int32 item_count = 1; optional string name_space = 2 [default = ""]; optional AppOverride override = 3; } message MemcacheGrabTailResponse { repeated group Item = 1 { required bytes value = 2; optional fixed32 flags = 3; } } ================================================ FILE: vendor/google.golang.org/appengine/internal/metadata.go ================================================ // Copyright 2014 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. package internal // This file has code for accessing metadata. // // References: // https://cloud.google.com/compute/docs/metadata import ( "fmt" "io/ioutil" "log" "net/http" "net/url" ) const ( metadataHost = "metadata" metadataPath = "/computeMetadata/v1/" ) var ( metadataRequestHeaders = http.Header{ "X-Google-Metadata-Request": []string{"True"}, } ) // TODO(dsymonds): Do we need to support default values, like Python? func mustGetMetadata(key string) []byte { b, err := getMetadata(key) if err != nil { log.Fatalf("Metadata fetch failed: %v", err) } return b } func getMetadata(key string) ([]byte, error) { // TODO(dsymonds): May need to use url.Parse to support keys with query args. req := &http.Request{ Method: "GET", URL: &url.URL{ Scheme: "http", Host: metadataHost, Path: metadataPath + key, }, Header: metadataRequestHeaders, Host: metadataHost, } resp, err := http.DefaultClient.Do(req) if err != nil { return nil, err } defer resp.Body.Close() if resp.StatusCode != 200 { return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode) } return ioutil.ReadAll(resp.Body) } ================================================ FILE: vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go ================================================ // Code generated by protoc-gen-go. // source: google.golang.org/appengine/internal/modules/modules_service.proto // DO NOT EDIT! /* Package modules is a generated protocol buffer package. It is generated from these files: google.golang.org/appengine/internal/modules/modules_service.proto It has these top-level messages: ModulesServiceError GetModulesRequest GetModulesResponse GetVersionsRequest GetVersionsResponse GetDefaultVersionRequest GetDefaultVersionResponse GetNumInstancesRequest GetNumInstancesResponse SetNumInstancesRequest SetNumInstancesResponse StartModuleRequest StartModuleResponse StopModuleRequest StopModuleResponse GetHostnameRequest GetHostnameResponse */ package modules import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf type ModulesServiceError_ErrorCode int32 const ( ModulesServiceError_OK ModulesServiceError_ErrorCode = 0 ModulesServiceError_INVALID_MODULE ModulesServiceError_ErrorCode = 1 ModulesServiceError_INVALID_VERSION ModulesServiceError_ErrorCode = 2 ModulesServiceError_INVALID_INSTANCES ModulesServiceError_ErrorCode = 3 ModulesServiceError_TRANSIENT_ERROR ModulesServiceError_ErrorCode = 4 ModulesServiceError_UNEXPECTED_STATE ModulesServiceError_ErrorCode = 5 ) var ModulesServiceError_ErrorCode_name = map[int32]string{ 0: "OK", 1: "INVALID_MODULE", 2: "INVALID_VERSION", 3: "INVALID_INSTANCES", 4: "TRANSIENT_ERROR", 5: "UNEXPECTED_STATE", } var ModulesServiceError_ErrorCode_value = map[string]int32{ "OK": 0, "INVALID_MODULE": 1, "INVALID_VERSION": 2, "INVALID_INSTANCES": 3, "TRANSIENT_ERROR": 4, "UNEXPECTED_STATE": 5, } func (x ModulesServiceError_ErrorCode) Enum() *ModulesServiceError_ErrorCode { p := new(ModulesServiceError_ErrorCode) *p = x return p } func (x ModulesServiceError_ErrorCode) String() string { return proto.EnumName(ModulesServiceError_ErrorCode_name, int32(x)) } func (x *ModulesServiceError_ErrorCode) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(ModulesServiceError_ErrorCode_value, data, "ModulesServiceError_ErrorCode") if err != nil { return err } *x = ModulesServiceError_ErrorCode(value) return nil } type ModulesServiceError struct { XXX_unrecognized []byte `json:"-"` } func (m *ModulesServiceError) Reset() { *m = ModulesServiceError{} } func (m *ModulesServiceError) String() string { return proto.CompactTextString(m) } func (*ModulesServiceError) ProtoMessage() {} type GetModulesRequest struct { XXX_unrecognized []byte `json:"-"` } func (m *GetModulesRequest) Reset() { *m = GetModulesRequest{} } func (m *GetModulesRequest) String() string { return proto.CompactTextString(m) } func (*GetModulesRequest) ProtoMessage() {} type GetModulesResponse struct { Module []string `protobuf:"bytes,1,rep,name=module" json:"module,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *GetModulesResponse) Reset() { *m = GetModulesResponse{} } func (m *GetModulesResponse) String() string { return proto.CompactTextString(m) } func (*GetModulesResponse) ProtoMessage() {} func (m *GetModulesResponse) GetModule() []string { if m != nil { return m.Module } return nil } type GetVersionsRequest struct { Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *GetVersionsRequest) Reset() { *m = GetVersionsRequest{} } func (m *GetVersionsRequest) String() string { return proto.CompactTextString(m) } func (*GetVersionsRequest) ProtoMessage() {} func (m *GetVersionsRequest) GetModule() string { if m != nil && m.Module != nil { return *m.Module } return "" } type GetVersionsResponse struct { Version []string `protobuf:"bytes,1,rep,name=version" json:"version,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *GetVersionsResponse) Reset() { *m = GetVersionsResponse{} } func (m *GetVersionsResponse) String() string { return proto.CompactTextString(m) } func (*GetVersionsResponse) ProtoMessage() {} func (m *GetVersionsResponse) GetVersion() []string { if m != nil { return m.Version } return nil } type GetDefaultVersionRequest struct { Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *GetDefaultVersionRequest) Reset() { *m = GetDefaultVersionRequest{} } func (m *GetDefaultVersionRequest) String() string { return proto.CompactTextString(m) } func (*GetDefaultVersionRequest) ProtoMessage() {} func (m *GetDefaultVersionRequest) GetModule() string { if m != nil && m.Module != nil { return *m.Module } return "" } type GetDefaultVersionResponse struct { Version *string `protobuf:"bytes,1,req,name=version" json:"version,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *GetDefaultVersionResponse) Reset() { *m = GetDefaultVersionResponse{} } func (m *GetDefaultVersionResponse) String() string { return proto.CompactTextString(m) } func (*GetDefaultVersionResponse) ProtoMessage() {} func (m *GetDefaultVersionResponse) GetVersion() string { if m != nil && m.Version != nil { return *m.Version } return "" } type GetNumInstancesRequest struct { Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *GetNumInstancesRequest) Reset() { *m = GetNumInstancesRequest{} } func (m *GetNumInstancesRequest) String() string { return proto.CompactTextString(m) } func (*GetNumInstancesRequest) ProtoMessage() {} func (m *GetNumInstancesRequest) GetModule() string { if m != nil && m.Module != nil { return *m.Module } return "" } func (m *GetNumInstancesRequest) GetVersion() string { if m != nil && m.Version != nil { return *m.Version } return "" } type GetNumInstancesResponse struct { Instances *int64 `protobuf:"varint,1,req,name=instances" json:"instances,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *GetNumInstancesResponse) Reset() { *m = GetNumInstancesResponse{} } func (m *GetNumInstancesResponse) String() string { return proto.CompactTextString(m) } func (*GetNumInstancesResponse) ProtoMessage() {} func (m *GetNumInstancesResponse) GetInstances() int64 { if m != nil && m.Instances != nil { return *m.Instances } return 0 } type SetNumInstancesRequest struct { Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` Instances *int64 `protobuf:"varint,3,req,name=instances" json:"instances,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *SetNumInstancesRequest) Reset() { *m = SetNumInstancesRequest{} } func (m *SetNumInstancesRequest) String() string { return proto.CompactTextString(m) } func (*SetNumInstancesRequest) ProtoMessage() {} func (m *SetNumInstancesRequest) GetModule() string { if m != nil && m.Module != nil { return *m.Module } return "" } func (m *SetNumInstancesRequest) GetVersion() string { if m != nil && m.Version != nil { return *m.Version } return "" } func (m *SetNumInstancesRequest) GetInstances() int64 { if m != nil && m.Instances != nil { return *m.Instances } return 0 } type SetNumInstancesResponse struct { XXX_unrecognized []byte `json:"-"` } func (m *SetNumInstancesResponse) Reset() { *m = SetNumInstancesResponse{} } func (m *SetNumInstancesResponse) String() string { return proto.CompactTextString(m) } func (*SetNumInstancesResponse) ProtoMessage() {} type StartModuleRequest struct { Module *string `protobuf:"bytes,1,req,name=module" json:"module,omitempty"` Version *string `protobuf:"bytes,2,req,name=version" json:"version,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *StartModuleRequest) Reset() { *m = StartModuleRequest{} } func (m *StartModuleRequest) String() string { return proto.CompactTextString(m) } func (*StartModuleRequest) ProtoMessage() {} func (m *StartModuleRequest) GetModule() string { if m != nil && m.Module != nil { return *m.Module } return "" } func (m *StartModuleRequest) GetVersion() string { if m != nil && m.Version != nil { return *m.Version } return "" } type StartModuleResponse struct { XXX_unrecognized []byte `json:"-"` } func (m *StartModuleResponse) Reset() { *m = StartModuleResponse{} } func (m *StartModuleResponse) String() string { return proto.CompactTextString(m) } func (*StartModuleResponse) ProtoMessage() {} type StopModuleRequest struct { Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *StopModuleRequest) Reset() { *m = StopModuleRequest{} } func (m *StopModuleRequest) String() string { return proto.CompactTextString(m) } func (*StopModuleRequest) ProtoMessage() {} func (m *StopModuleRequest) GetModule() string { if m != nil && m.Module != nil { return *m.Module } return "" } func (m *StopModuleRequest) GetVersion() string { if m != nil && m.Version != nil { return *m.Version } return "" } type StopModuleResponse struct { XXX_unrecognized []byte `json:"-"` } func (m *StopModuleResponse) Reset() { *m = StopModuleResponse{} } func (m *StopModuleResponse) String() string { return proto.CompactTextString(m) } func (*StopModuleResponse) ProtoMessage() {} type GetHostnameRequest struct { Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` Instance *string `protobuf:"bytes,3,opt,name=instance" json:"instance,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *GetHostnameRequest) Reset() { *m = GetHostnameRequest{} } func (m *GetHostnameRequest) String() string { return proto.CompactTextString(m) } func (*GetHostnameRequest) ProtoMessage() {} func (m *GetHostnameRequest) GetModule() string { if m != nil && m.Module != nil { return *m.Module } return "" } func (m *GetHostnameRequest) GetVersion() string { if m != nil && m.Version != nil { return *m.Version } return "" } func (m *GetHostnameRequest) GetInstance() string { if m != nil && m.Instance != nil { return *m.Instance } return "" } type GetHostnameResponse struct { Hostname *string `protobuf:"bytes,1,req,name=hostname" json:"hostname,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *GetHostnameResponse) Reset() { *m = GetHostnameResponse{} } func (m *GetHostnameResponse) String() string { return proto.CompactTextString(m) } func (*GetHostnameResponse) ProtoMessage() {} func (m *GetHostnameResponse) GetHostname() string { if m != nil && m.Hostname != nil { return *m.Hostname } return "" } func init() { } ================================================ FILE: vendor/google.golang.org/appengine/internal/modules/modules_service.proto ================================================ syntax = "proto2"; option go_package = "modules"; package appengine; message ModulesServiceError { enum ErrorCode { OK = 0; INVALID_MODULE = 1; INVALID_VERSION = 2; INVALID_INSTANCES = 3; TRANSIENT_ERROR = 4; UNEXPECTED_STATE = 5; } } message GetModulesRequest { } message GetModulesResponse { repeated string module = 1; } message GetVersionsRequest { optional string module = 1; } message GetVersionsResponse { repeated string version = 1; } message GetDefaultVersionRequest { optional string module = 1; } message GetDefaultVersionResponse { required string version = 1; } message GetNumInstancesRequest { optional string module = 1; optional string version = 2; } message GetNumInstancesResponse { required int64 instances = 1; } message SetNumInstancesRequest { optional string module = 1; optional string version = 2; required int64 instances = 3; } message SetNumInstancesResponse {} message StartModuleRequest { required string module = 1; required string version = 2; } message StartModuleResponse {} message StopModuleRequest { optional string module = 1; optional string version = 2; } message StopModuleResponse {} message GetHostnameRequest { optional string module = 1; optional string version = 2; optional string instance = 3; } message GetHostnameResponse { required string hostname = 1; } ================================================ FILE: vendor/google.golang.org/appengine/internal/net.go ================================================ // Copyright 2014 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. package internal // This file implements a network dialer that limits the number of concurrent connections. // It is only used for API calls. import ( "log" "net" "runtime" "sync" "time" ) var limitSem = make(chan int, 100) // TODO(dsymonds): Use environment variable. func limitRelease() { // non-blocking select { case <-limitSem: default: // This should not normally happen. log.Print("appengine: unbalanced limitSem release!") } } func limitDial(network, addr string) (net.Conn, error) { limitSem <- 1 // Dial with a timeout in case the API host is MIA. // The connection should normally be very fast. conn, err := net.DialTimeout(network, addr, 500*time.Millisecond) if err != nil { limitRelease() return nil, err } lc := &limitConn{Conn: conn} runtime.SetFinalizer(lc, (*limitConn).Close) // shouldn't usually be required return lc, nil } type limitConn struct { close sync.Once net.Conn } func (lc *limitConn) Close() error { defer lc.close.Do(func() { limitRelease() runtime.SetFinalizer(lc, nil) }) return lc.Conn.Close() } ================================================ FILE: vendor/google.golang.org/appengine/internal/regen.sh ================================================ #!/bin/bash -e # # This script rebuilds the generated code for the protocol buffers. # To run this you will need protoc and goprotobuf installed; # see https://github.com/golang/protobuf for instructions. PKG=google.golang.org/appengine function die() { echo 1>&2 $* exit 1 } # Sanity check that the right tools are accessible. for tool in go protoc protoc-gen-go; do q=$(which $tool) || die "didn't find $tool" echo 1>&2 "$tool: $q" done echo -n 1>&2 "finding package dir... " pkgdir=$(go list -f '{{.Dir}}' $PKG) echo 1>&2 $pkgdir base=$(echo $pkgdir | sed "s,/$PKG\$,,") echo 1>&2 "base: $base" cd $base # Run protoc once per package. for dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do echo 1>&2 "* $dir" protoc --go_out=. $dir/*.proto done for f in $(find $PKG/internal -name '*.pb.go'); do # Remove proto.RegisterEnum calls. # These cause duplicate registration panics when these packages # are used on classic App Engine. proto.RegisterEnum only affects # parsing the text format; we don't care about that. # https://code.google.com/p/googleappengine/issues/detail?id=11670#c17 sed -i '/proto.RegisterEnum/d' $f done ================================================ FILE: vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go ================================================ // Code generated by protoc-gen-go. // source: google.golang.org/appengine/internal/remote_api/remote_api.proto // DO NOT EDIT! /* Package remote_api is a generated protocol buffer package. It is generated from these files: google.golang.org/appengine/internal/remote_api/remote_api.proto It has these top-level messages: Request ApplicationError RpcError Response */ package remote_api import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf type RpcError_ErrorCode int32 const ( RpcError_UNKNOWN RpcError_ErrorCode = 0 RpcError_CALL_NOT_FOUND RpcError_ErrorCode = 1 RpcError_PARSE_ERROR RpcError_ErrorCode = 2 RpcError_SECURITY_VIOLATION RpcError_ErrorCode = 3 RpcError_OVER_QUOTA RpcError_ErrorCode = 4 RpcError_REQUEST_TOO_LARGE RpcError_ErrorCode = 5 RpcError_CAPABILITY_DISABLED RpcError_ErrorCode = 6 RpcError_FEATURE_DISABLED RpcError_ErrorCode = 7 RpcError_BAD_REQUEST RpcError_ErrorCode = 8 RpcError_RESPONSE_TOO_LARGE RpcError_ErrorCode = 9 RpcError_CANCELLED RpcError_ErrorCode = 10 RpcError_REPLAY_ERROR RpcError_ErrorCode = 11 RpcError_DEADLINE_EXCEEDED RpcError_ErrorCode = 12 ) var RpcError_ErrorCode_name = map[int32]string{ 0: "UNKNOWN", 1: "CALL_NOT_FOUND", 2: "PARSE_ERROR", 3: "SECURITY_VIOLATION", 4: "OVER_QUOTA", 5: "REQUEST_TOO_LARGE", 6: "CAPABILITY_DISABLED", 7: "FEATURE_DISABLED", 8: "BAD_REQUEST", 9: "RESPONSE_TOO_LARGE", 10: "CANCELLED", 11: "REPLAY_ERROR", 12: "DEADLINE_EXCEEDED", } var RpcError_ErrorCode_value = map[string]int32{ "UNKNOWN": 0, "CALL_NOT_FOUND": 1, "PARSE_ERROR": 2, "SECURITY_VIOLATION": 3, "OVER_QUOTA": 4, "REQUEST_TOO_LARGE": 5, "CAPABILITY_DISABLED": 6, "FEATURE_DISABLED": 7, "BAD_REQUEST": 8, "RESPONSE_TOO_LARGE": 9, "CANCELLED": 10, "REPLAY_ERROR": 11, "DEADLINE_EXCEEDED": 12, } func (x RpcError_ErrorCode) Enum() *RpcError_ErrorCode { p := new(RpcError_ErrorCode) *p = x return p } func (x RpcError_ErrorCode) String() string { return proto.EnumName(RpcError_ErrorCode_name, int32(x)) } func (x *RpcError_ErrorCode) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(RpcError_ErrorCode_value, data, "RpcError_ErrorCode") if err != nil { return err } *x = RpcError_ErrorCode(value) return nil } type Request struct { ServiceName *string `protobuf:"bytes,2,req,name=service_name" json:"service_name,omitempty"` Method *string `protobuf:"bytes,3,req,name=method" json:"method,omitempty"` Request []byte `protobuf:"bytes,4,req,name=request" json:"request,omitempty"` RequestId *string `protobuf:"bytes,5,opt,name=request_id" json:"request_id,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Request) Reset() { *m = Request{} } func (m *Request) String() string { return proto.CompactTextString(m) } func (*Request) ProtoMessage() {} func (m *Request) GetServiceName() string { if m != nil && m.ServiceName != nil { return *m.ServiceName } return "" } func (m *Request) GetMethod() string { if m != nil && m.Method != nil { return *m.Method } return "" } func (m *Request) GetRequest() []byte { if m != nil { return m.Request } return nil } func (m *Request) GetRequestId() string { if m != nil && m.RequestId != nil { return *m.RequestId } return "" } type ApplicationError struct { Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"` Detail *string `protobuf:"bytes,2,req,name=detail" json:"detail,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *ApplicationError) Reset() { *m = ApplicationError{} } func (m *ApplicationError) String() string { return proto.CompactTextString(m) } func (*ApplicationError) ProtoMessage() {} func (m *ApplicationError) GetCode() int32 { if m != nil && m.Code != nil { return *m.Code } return 0 } func (m *ApplicationError) GetDetail() string { if m != nil && m.Detail != nil { return *m.Detail } return "" } type RpcError struct { Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"` Detail *string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *RpcError) Reset() { *m = RpcError{} } func (m *RpcError) String() string { return proto.CompactTextString(m) } func (*RpcError) ProtoMessage() {} func (m *RpcError) GetCode() int32 { if m != nil && m.Code != nil { return *m.Code } return 0 } func (m *RpcError) GetDetail() string { if m != nil && m.Detail != nil { return *m.Detail } return "" } type Response struct { Response []byte `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"` Exception []byte `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"` ApplicationError *ApplicationError `protobuf:"bytes,3,opt,name=application_error" json:"application_error,omitempty"` JavaException []byte `protobuf:"bytes,4,opt,name=java_exception" json:"java_exception,omitempty"` RpcError *RpcError `protobuf:"bytes,5,opt,name=rpc_error" json:"rpc_error,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Response) Reset() { *m = Response{} } func (m *Response) String() string { return proto.CompactTextString(m) } func (*Response) ProtoMessage() {} func (m *Response) GetResponse() []byte { if m != nil { return m.Response } return nil } func (m *Response) GetException() []byte { if m != nil { return m.Exception } return nil } func (m *Response) GetApplicationError() *ApplicationError { if m != nil { return m.ApplicationError } return nil } func (m *Response) GetJavaException() []byte { if m != nil { return m.JavaException } return nil } func (m *Response) GetRpcError() *RpcError { if m != nil { return m.RpcError } return nil } func init() { } ================================================ FILE: vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto ================================================ syntax = "proto2"; option go_package = "remote_api"; package remote_api; message Request { required string service_name = 2; required string method = 3; required bytes request = 4; optional string request_id = 5; } message ApplicationError { required int32 code = 1; required string detail = 2; } message RpcError { enum ErrorCode { UNKNOWN = 0; CALL_NOT_FOUND = 1; PARSE_ERROR = 2; SECURITY_VIOLATION = 3; OVER_QUOTA = 4; REQUEST_TOO_LARGE = 5; CAPABILITY_DISABLED = 6; FEATURE_DISABLED = 7; BAD_REQUEST = 8; RESPONSE_TOO_LARGE = 9; CANCELLED = 10; REPLAY_ERROR = 11; DEADLINE_EXCEEDED = 12; } required int32 code = 1; optional string detail = 2; } message Response { optional bytes response = 1; optional bytes exception = 2; optional ApplicationError application_error = 3; optional bytes java_exception = 4; optional RpcError rpc_error = 5; } ================================================ FILE: vendor/google.golang.org/appengine/internal/search/search.pb.go ================================================ // Code generated by protoc-gen-go. // source: google.golang.org/appengine/internal/search/search.proto // DO NOT EDIT! /* Package search is a generated protocol buffer package. It is generated from these files: google.golang.org/appengine/internal/search/search.proto It has these top-level messages: Scope Entry AccessControlList FieldValue Field FieldTypes IndexShardSettings FacetValue Facet DocumentMetadata Document SearchServiceError RequestStatus IndexSpec IndexMetadata IndexDocumentParams IndexDocumentRequest IndexDocumentResponse DeleteDocumentParams DeleteDocumentRequest DeleteDocumentResponse ListDocumentsParams ListDocumentsRequest ListDocumentsResponse ListIndexesParams ListIndexesRequest ListIndexesResponse DeleteSchemaParams DeleteSchemaRequest DeleteSchemaResponse SortSpec ScorerSpec FieldSpec FacetRange FacetRequestParam FacetAutoDetectParam FacetRequest FacetRefinement SearchParams SearchRequest FacetResultValue FacetResult SearchResult SearchResponse */ package search import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf type Scope_Type int32 const ( Scope_USER_BY_CANONICAL_ID Scope_Type = 1 Scope_USER_BY_EMAIL Scope_Type = 2 Scope_GROUP_BY_CANONICAL_ID Scope_Type = 3 Scope_GROUP_BY_EMAIL Scope_Type = 4 Scope_GROUP_BY_DOMAIN Scope_Type = 5 Scope_ALL_USERS Scope_Type = 6 Scope_ALL_AUTHENTICATED_USERS Scope_Type = 7 ) var Scope_Type_name = map[int32]string{ 1: "USER_BY_CANONICAL_ID", 2: "USER_BY_EMAIL", 3: "GROUP_BY_CANONICAL_ID", 4: "GROUP_BY_EMAIL", 5: "GROUP_BY_DOMAIN", 6: "ALL_USERS", 7: "ALL_AUTHENTICATED_USERS", } var Scope_Type_value = map[string]int32{ "USER_BY_CANONICAL_ID": 1, "USER_BY_EMAIL": 2, "GROUP_BY_CANONICAL_ID": 3, "GROUP_BY_EMAIL": 4, "GROUP_BY_DOMAIN": 5, "ALL_USERS": 6, "ALL_AUTHENTICATED_USERS": 7, } func (x Scope_Type) Enum() *Scope_Type { p := new(Scope_Type) *p = x return p } func (x Scope_Type) String() string { return proto.EnumName(Scope_Type_name, int32(x)) } func (x *Scope_Type) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(Scope_Type_value, data, "Scope_Type") if err != nil { return err } *x = Scope_Type(value) return nil } type Entry_Permission int32 const ( Entry_READ Entry_Permission = 1 Entry_WRITE Entry_Permission = 2 Entry_FULL_CONTROL Entry_Permission = 3 ) var Entry_Permission_name = map[int32]string{ 1: "READ", 2: "WRITE", 3: "FULL_CONTROL", } var Entry_Permission_value = map[string]int32{ "READ": 1, "WRITE": 2, "FULL_CONTROL": 3, } func (x Entry_Permission) Enum() *Entry_Permission { p := new(Entry_Permission) *p = x return p } func (x Entry_Permission) String() string { return proto.EnumName(Entry_Permission_name, int32(x)) } func (x *Entry_Permission) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(Entry_Permission_value, data, "Entry_Permission") if err != nil { return err } *x = Entry_Permission(value) return nil } type FieldValue_ContentType int32 const ( FieldValue_TEXT FieldValue_ContentType = 0 FieldValue_HTML FieldValue_ContentType = 1 FieldValue_ATOM FieldValue_ContentType = 2 FieldValue_DATE FieldValue_ContentType = 3 FieldValue_NUMBER FieldValue_ContentType = 4 FieldValue_GEO FieldValue_ContentType = 5 ) var FieldValue_ContentType_name = map[int32]string{ 0: "TEXT", 1: "HTML", 2: "ATOM", 3: "DATE", 4: "NUMBER", 5: "GEO", } var FieldValue_ContentType_value = map[string]int32{ "TEXT": 0, "HTML": 1, "ATOM": 2, "DATE": 3, "NUMBER": 4, "GEO": 5, } func (x FieldValue_ContentType) Enum() *FieldValue_ContentType { p := new(FieldValue_ContentType) *p = x return p } func (x FieldValue_ContentType) String() string { return proto.EnumName(FieldValue_ContentType_name, int32(x)) } func (x *FieldValue_ContentType) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(FieldValue_ContentType_value, data, "FieldValue_ContentType") if err != nil { return err } *x = FieldValue_ContentType(value) return nil } type FacetValue_ContentType int32 const ( FacetValue_ATOM FacetValue_ContentType = 2 FacetValue_NUMBER FacetValue_ContentType = 4 ) var FacetValue_ContentType_name = map[int32]string{ 2: "ATOM", 4: "NUMBER", } var FacetValue_ContentType_value = map[string]int32{ "ATOM": 2, "NUMBER": 4, } func (x FacetValue_ContentType) Enum() *FacetValue_ContentType { p := new(FacetValue_ContentType) *p = x return p } func (x FacetValue_ContentType) String() string { return proto.EnumName(FacetValue_ContentType_name, int32(x)) } func (x *FacetValue_ContentType) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(FacetValue_ContentType_value, data, "FacetValue_ContentType") if err != nil { return err } *x = FacetValue_ContentType(value) return nil } type Document_Storage int32 const ( Document_DISK Document_Storage = 0 ) var Document_Storage_name = map[int32]string{ 0: "DISK", } var Document_Storage_value = map[string]int32{ "DISK": 0, } func (x Document_Storage) Enum() *Document_Storage { p := new(Document_Storage) *p = x return p } func (x Document_Storage) String() string { return proto.EnumName(Document_Storage_name, int32(x)) } func (x *Document_Storage) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(Document_Storage_value, data, "Document_Storage") if err != nil { return err } *x = Document_Storage(value) return nil } type SearchServiceError_ErrorCode int32 const ( SearchServiceError_OK SearchServiceError_ErrorCode = 0 SearchServiceError_INVALID_REQUEST SearchServiceError_ErrorCode = 1 SearchServiceError_TRANSIENT_ERROR SearchServiceError_ErrorCode = 2 SearchServiceError_INTERNAL_ERROR SearchServiceError_ErrorCode = 3 SearchServiceError_PERMISSION_DENIED SearchServiceError_ErrorCode = 4 SearchServiceError_TIMEOUT SearchServiceError_ErrorCode = 5 SearchServiceError_CONCURRENT_TRANSACTION SearchServiceError_ErrorCode = 6 ) var SearchServiceError_ErrorCode_name = map[int32]string{ 0: "OK", 1: "INVALID_REQUEST", 2: "TRANSIENT_ERROR", 3: "INTERNAL_ERROR", 4: "PERMISSION_DENIED", 5: "TIMEOUT", 6: "CONCURRENT_TRANSACTION", } var SearchServiceError_ErrorCode_value = map[string]int32{ "OK": 0, "INVALID_REQUEST": 1, "TRANSIENT_ERROR": 2, "INTERNAL_ERROR": 3, "PERMISSION_DENIED": 4, "TIMEOUT": 5, "CONCURRENT_TRANSACTION": 6, } func (x SearchServiceError_ErrorCode) Enum() *SearchServiceError_ErrorCode { p := new(SearchServiceError_ErrorCode) *p = x return p } func (x SearchServiceError_ErrorCode) String() string { return proto.EnumName(SearchServiceError_ErrorCode_name, int32(x)) } func (x *SearchServiceError_ErrorCode) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(SearchServiceError_ErrorCode_value, data, "SearchServiceError_ErrorCode") if err != nil { return err } *x = SearchServiceError_ErrorCode(value) return nil } type IndexSpec_Consistency int32 const ( IndexSpec_GLOBAL IndexSpec_Consistency = 0 IndexSpec_PER_DOCUMENT IndexSpec_Consistency = 1 ) var IndexSpec_Consistency_name = map[int32]string{ 0: "GLOBAL", 1: "PER_DOCUMENT", } var IndexSpec_Consistency_value = map[string]int32{ "GLOBAL": 0, "PER_DOCUMENT": 1, } func (x IndexSpec_Consistency) Enum() *IndexSpec_Consistency { p := new(IndexSpec_Consistency) *p = x return p } func (x IndexSpec_Consistency) String() string { return proto.EnumName(IndexSpec_Consistency_name, int32(x)) } func (x *IndexSpec_Consistency) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(IndexSpec_Consistency_value, data, "IndexSpec_Consistency") if err != nil { return err } *x = IndexSpec_Consistency(value) return nil } type IndexSpec_Source int32 const ( IndexSpec_SEARCH IndexSpec_Source = 0 IndexSpec_DATASTORE IndexSpec_Source = 1 IndexSpec_CLOUD_STORAGE IndexSpec_Source = 2 ) var IndexSpec_Source_name = map[int32]string{ 0: "SEARCH", 1: "DATASTORE", 2: "CLOUD_STORAGE", } var IndexSpec_Source_value = map[string]int32{ "SEARCH": 0, "DATASTORE": 1, "CLOUD_STORAGE": 2, } func (x IndexSpec_Source) Enum() *IndexSpec_Source { p := new(IndexSpec_Source) *p = x return p } func (x IndexSpec_Source) String() string { return proto.EnumName(IndexSpec_Source_name, int32(x)) } func (x *IndexSpec_Source) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(IndexSpec_Source_value, data, "IndexSpec_Source") if err != nil { return err } *x = IndexSpec_Source(value) return nil } type IndexSpec_Mode int32 const ( IndexSpec_PRIORITY IndexSpec_Mode = 0 IndexSpec_BACKGROUND IndexSpec_Mode = 1 ) var IndexSpec_Mode_name = map[int32]string{ 0: "PRIORITY", 1: "BACKGROUND", } var IndexSpec_Mode_value = map[string]int32{ "PRIORITY": 0, "BACKGROUND": 1, } func (x IndexSpec_Mode) Enum() *IndexSpec_Mode { p := new(IndexSpec_Mode) *p = x return p } func (x IndexSpec_Mode) String() string { return proto.EnumName(IndexSpec_Mode_name, int32(x)) } func (x *IndexSpec_Mode) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(IndexSpec_Mode_value, data, "IndexSpec_Mode") if err != nil { return err } *x = IndexSpec_Mode(value) return nil } type IndexDocumentParams_Freshness int32 const ( IndexDocumentParams_SYNCHRONOUSLY IndexDocumentParams_Freshness = 0 IndexDocumentParams_WHEN_CONVENIENT IndexDocumentParams_Freshness = 1 ) var IndexDocumentParams_Freshness_name = map[int32]string{ 0: "SYNCHRONOUSLY", 1: "WHEN_CONVENIENT", } var IndexDocumentParams_Freshness_value = map[string]int32{ "SYNCHRONOUSLY": 0, "WHEN_CONVENIENT": 1, } func (x IndexDocumentParams_Freshness) Enum() *IndexDocumentParams_Freshness { p := new(IndexDocumentParams_Freshness) *p = x return p } func (x IndexDocumentParams_Freshness) String() string { return proto.EnumName(IndexDocumentParams_Freshness_name, int32(x)) } func (x *IndexDocumentParams_Freshness) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(IndexDocumentParams_Freshness_value, data, "IndexDocumentParams_Freshness") if err != nil { return err } *x = IndexDocumentParams_Freshness(value) return nil } type ScorerSpec_Scorer int32 const ( ScorerSpec_RESCORING_MATCH_SCORER ScorerSpec_Scorer = 0 ScorerSpec_MATCH_SCORER ScorerSpec_Scorer = 2 ) var ScorerSpec_Scorer_name = map[int32]string{ 0: "RESCORING_MATCH_SCORER", 2: "MATCH_SCORER", } var ScorerSpec_Scorer_value = map[string]int32{ "RESCORING_MATCH_SCORER": 0, "MATCH_SCORER": 2, } func (x ScorerSpec_Scorer) Enum() *ScorerSpec_Scorer { p := new(ScorerSpec_Scorer) *p = x return p } func (x ScorerSpec_Scorer) String() string { return proto.EnumName(ScorerSpec_Scorer_name, int32(x)) } func (x *ScorerSpec_Scorer) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(ScorerSpec_Scorer_value, data, "ScorerSpec_Scorer") if err != nil { return err } *x = ScorerSpec_Scorer(value) return nil } type SearchParams_CursorType int32 const ( SearchParams_NONE SearchParams_CursorType = 0 SearchParams_SINGLE SearchParams_CursorType = 1 SearchParams_PER_RESULT SearchParams_CursorType = 2 ) var SearchParams_CursorType_name = map[int32]string{ 0: "NONE", 1: "SINGLE", 2: "PER_RESULT", } var SearchParams_CursorType_value = map[string]int32{ "NONE": 0, "SINGLE": 1, "PER_RESULT": 2, } func (x SearchParams_CursorType) Enum() *SearchParams_CursorType { p := new(SearchParams_CursorType) *p = x return p } func (x SearchParams_CursorType) String() string { return proto.EnumName(SearchParams_CursorType_name, int32(x)) } func (x *SearchParams_CursorType) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(SearchParams_CursorType_value, data, "SearchParams_CursorType") if err != nil { return err } *x = SearchParams_CursorType(value) return nil } type SearchParams_ParsingMode int32 const ( SearchParams_STRICT SearchParams_ParsingMode = 0 SearchParams_RELAXED SearchParams_ParsingMode = 1 ) var SearchParams_ParsingMode_name = map[int32]string{ 0: "STRICT", 1: "RELAXED", } var SearchParams_ParsingMode_value = map[string]int32{ "STRICT": 0, "RELAXED": 1, } func (x SearchParams_ParsingMode) Enum() *SearchParams_ParsingMode { p := new(SearchParams_ParsingMode) *p = x return p } func (x SearchParams_ParsingMode) String() string { return proto.EnumName(SearchParams_ParsingMode_name, int32(x)) } func (x *SearchParams_ParsingMode) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(SearchParams_ParsingMode_value, data, "SearchParams_ParsingMode") if err != nil { return err } *x = SearchParams_ParsingMode(value) return nil } type Scope struct { Type *Scope_Type `protobuf:"varint,1,opt,name=type,enum=search.Scope_Type" json:"type,omitempty"` Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Scope) Reset() { *m = Scope{} } func (m *Scope) String() string { return proto.CompactTextString(m) } func (*Scope) ProtoMessage() {} func (m *Scope) GetType() Scope_Type { if m != nil && m.Type != nil { return *m.Type } return Scope_USER_BY_CANONICAL_ID } func (m *Scope) GetValue() string { if m != nil && m.Value != nil { return *m.Value } return "" } type Entry struct { Scope *Scope `protobuf:"bytes,1,opt,name=scope" json:"scope,omitempty"` Permission *Entry_Permission `protobuf:"varint,2,opt,name=permission,enum=search.Entry_Permission" json:"permission,omitempty"` DisplayName *string `protobuf:"bytes,3,opt,name=display_name" json:"display_name,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Entry) Reset() { *m = Entry{} } func (m *Entry) String() string { return proto.CompactTextString(m) } func (*Entry) ProtoMessage() {} func (m *Entry) GetScope() *Scope { if m != nil { return m.Scope } return nil } func (m *Entry) GetPermission() Entry_Permission { if m != nil && m.Permission != nil { return *m.Permission } return Entry_READ } func (m *Entry) GetDisplayName() string { if m != nil && m.DisplayName != nil { return *m.DisplayName } return "" } type AccessControlList struct { Owner *string `protobuf:"bytes,1,opt,name=owner" json:"owner,omitempty"` Entries []*Entry `protobuf:"bytes,2,rep,name=entries" json:"entries,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *AccessControlList) Reset() { *m = AccessControlList{} } func (m *AccessControlList) String() string { return proto.CompactTextString(m) } func (*AccessControlList) ProtoMessage() {} func (m *AccessControlList) GetOwner() string { if m != nil && m.Owner != nil { return *m.Owner } return "" } func (m *AccessControlList) GetEntries() []*Entry { if m != nil { return m.Entries } return nil } type FieldValue struct { Type *FieldValue_ContentType `protobuf:"varint,1,opt,name=type,enum=search.FieldValue_ContentType,def=0" json:"type,omitempty"` Language *string `protobuf:"bytes,2,opt,name=language,def=en" json:"language,omitempty"` StringValue *string `protobuf:"bytes,3,opt,name=string_value" json:"string_value,omitempty"` Geo *FieldValue_Geo `protobuf:"group,4,opt,name=Geo" json:"geo,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *FieldValue) Reset() { *m = FieldValue{} } func (m *FieldValue) String() string { return proto.CompactTextString(m) } func (*FieldValue) ProtoMessage() {} const Default_FieldValue_Type FieldValue_ContentType = FieldValue_TEXT const Default_FieldValue_Language string = "en" func (m *FieldValue) GetType() FieldValue_ContentType { if m != nil && m.Type != nil { return *m.Type } return Default_FieldValue_Type } func (m *FieldValue) GetLanguage() string { if m != nil && m.Language != nil { return *m.Language } return Default_FieldValue_Language } func (m *FieldValue) GetStringValue() string { if m != nil && m.StringValue != nil { return *m.StringValue } return "" } func (m *FieldValue) GetGeo() *FieldValue_Geo { if m != nil { return m.Geo } return nil } type FieldValue_Geo struct { Lat *float64 `protobuf:"fixed64,5,req,name=lat" json:"lat,omitempty"` Lng *float64 `protobuf:"fixed64,6,req,name=lng" json:"lng,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *FieldValue_Geo) Reset() { *m = FieldValue_Geo{} } func (m *FieldValue_Geo) String() string { return proto.CompactTextString(m) } func (*FieldValue_Geo) ProtoMessage() {} func (m *FieldValue_Geo) GetLat() float64 { if m != nil && m.Lat != nil { return *m.Lat } return 0 } func (m *FieldValue_Geo) GetLng() float64 { if m != nil && m.Lng != nil { return *m.Lng } return 0 } type Field struct { Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` Value *FieldValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Field) Reset() { *m = Field{} } func (m *Field) String() string { return proto.CompactTextString(m) } func (*Field) ProtoMessage() {} func (m *Field) GetName() string { if m != nil && m.Name != nil { return *m.Name } return "" } func (m *Field) GetValue() *FieldValue { if m != nil { return m.Value } return nil } type FieldTypes struct { Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` Type []FieldValue_ContentType `protobuf:"varint,2,rep,name=type,enum=search.FieldValue_ContentType" json:"type,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *FieldTypes) Reset() { *m = FieldTypes{} } func (m *FieldTypes) String() string { return proto.CompactTextString(m) } func (*FieldTypes) ProtoMessage() {} func (m *FieldTypes) GetName() string { if m != nil && m.Name != nil { return *m.Name } return "" } func (m *FieldTypes) GetType() []FieldValue_ContentType { if m != nil { return m.Type } return nil } type IndexShardSettings struct { PrevNumShards []int32 `protobuf:"varint,1,rep,name=prev_num_shards" json:"prev_num_shards,omitempty"` NumShards *int32 `protobuf:"varint,2,req,name=num_shards,def=1" json:"num_shards,omitempty"` PrevNumShardsSearchFalse []int32 `protobuf:"varint,3,rep,name=prev_num_shards_search_false" json:"prev_num_shards_search_false,omitempty"` LocalReplica *string `protobuf:"bytes,4,opt,name=local_replica,def=" json:"local_replica,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *IndexShardSettings) Reset() { *m = IndexShardSettings{} } func (m *IndexShardSettings) String() string { return proto.CompactTextString(m) } func (*IndexShardSettings) ProtoMessage() {} const Default_IndexShardSettings_NumShards int32 = 1 func (m *IndexShardSettings) GetPrevNumShards() []int32 { if m != nil { return m.PrevNumShards } return nil } func (m *IndexShardSettings) GetNumShards() int32 { if m != nil && m.NumShards != nil { return *m.NumShards } return Default_IndexShardSettings_NumShards } func (m *IndexShardSettings) GetPrevNumShardsSearchFalse() []int32 { if m != nil { return m.PrevNumShardsSearchFalse } return nil } func (m *IndexShardSettings) GetLocalReplica() string { if m != nil && m.LocalReplica != nil { return *m.LocalReplica } return "" } type FacetValue struct { Type *FacetValue_ContentType `protobuf:"varint,1,opt,name=type,enum=search.FacetValue_ContentType,def=2" json:"type,omitempty"` StringValue *string `protobuf:"bytes,3,opt,name=string_value" json:"string_value,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *FacetValue) Reset() { *m = FacetValue{} } func (m *FacetValue) String() string { return proto.CompactTextString(m) } func (*FacetValue) ProtoMessage() {} const Default_FacetValue_Type FacetValue_ContentType = FacetValue_ATOM func (m *FacetValue) GetType() FacetValue_ContentType { if m != nil && m.Type != nil { return *m.Type } return Default_FacetValue_Type } func (m *FacetValue) GetStringValue() string { if m != nil && m.StringValue != nil { return *m.StringValue } return "" } type Facet struct { Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` Value *FacetValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Facet) Reset() { *m = Facet{} } func (m *Facet) String() string { return proto.CompactTextString(m) } func (*Facet) ProtoMessage() {} func (m *Facet) GetName() string { if m != nil && m.Name != nil { return *m.Name } return "" } func (m *Facet) GetValue() *FacetValue { if m != nil { return m.Value } return nil } type DocumentMetadata struct { Version *int64 `protobuf:"varint,1,opt,name=version" json:"version,omitempty"` CommittedStVersion *int64 `protobuf:"varint,2,opt,name=committed_st_version" json:"committed_st_version,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *DocumentMetadata) Reset() { *m = DocumentMetadata{} } func (m *DocumentMetadata) String() string { return proto.CompactTextString(m) } func (*DocumentMetadata) ProtoMessage() {} func (m *DocumentMetadata) GetVersion() int64 { if m != nil && m.Version != nil { return *m.Version } return 0 } func (m *DocumentMetadata) GetCommittedStVersion() int64 { if m != nil && m.CommittedStVersion != nil { return *m.CommittedStVersion } return 0 } type Document struct { Id *string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` Language *string `protobuf:"bytes,2,opt,name=language,def=en" json:"language,omitempty"` Field []*Field `protobuf:"bytes,3,rep,name=field" json:"field,omitempty"` OrderId *int32 `protobuf:"varint,4,opt,name=order_id" json:"order_id,omitempty"` Storage *Document_Storage `protobuf:"varint,5,opt,name=storage,enum=search.Document_Storage,def=0" json:"storage,omitempty"` Facet []*Facet `protobuf:"bytes,8,rep,name=facet" json:"facet,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Document) Reset() { *m = Document{} } func (m *Document) String() string { return proto.CompactTextString(m) } func (*Document) ProtoMessage() {} const Default_Document_Language string = "en" const Default_Document_Storage Document_Storage = Document_DISK func (m *Document) GetId() string { if m != nil && m.Id != nil { return *m.Id } return "" } func (m *Document) GetLanguage() string { if m != nil && m.Language != nil { return *m.Language } return Default_Document_Language } func (m *Document) GetField() []*Field { if m != nil { return m.Field } return nil } func (m *Document) GetOrderId() int32 { if m != nil && m.OrderId != nil { return *m.OrderId } return 0 } func (m *Document) GetStorage() Document_Storage { if m != nil && m.Storage != nil { return *m.Storage } return Default_Document_Storage } func (m *Document) GetFacet() []*Facet { if m != nil { return m.Facet } return nil } type SearchServiceError struct { XXX_unrecognized []byte `json:"-"` } func (m *SearchServiceError) Reset() { *m = SearchServiceError{} } func (m *SearchServiceError) String() string { return proto.CompactTextString(m) } func (*SearchServiceError) ProtoMessage() {} type RequestStatus struct { Code *SearchServiceError_ErrorCode `protobuf:"varint,1,req,name=code,enum=search.SearchServiceError_ErrorCode" json:"code,omitempty"` ErrorDetail *string `protobuf:"bytes,2,opt,name=error_detail" json:"error_detail,omitempty"` CanonicalCode *int32 `protobuf:"varint,3,opt,name=canonical_code" json:"canonical_code,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *RequestStatus) Reset() { *m = RequestStatus{} } func (m *RequestStatus) String() string { return proto.CompactTextString(m) } func (*RequestStatus) ProtoMessage() {} func (m *RequestStatus) GetCode() SearchServiceError_ErrorCode { if m != nil && m.Code != nil { return *m.Code } return SearchServiceError_OK } func (m *RequestStatus) GetErrorDetail() string { if m != nil && m.ErrorDetail != nil { return *m.ErrorDetail } return "" } func (m *RequestStatus) GetCanonicalCode() int32 { if m != nil && m.CanonicalCode != nil { return *m.CanonicalCode } return 0 } type IndexSpec struct { Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` Consistency *IndexSpec_Consistency `protobuf:"varint,2,opt,name=consistency,enum=search.IndexSpec_Consistency,def=1" json:"consistency,omitempty"` Namespace *string `protobuf:"bytes,3,opt,name=namespace" json:"namespace,omitempty"` Version *int32 `protobuf:"varint,4,opt,name=version" json:"version,omitempty"` Source *IndexSpec_Source `protobuf:"varint,5,opt,name=source,enum=search.IndexSpec_Source,def=0" json:"source,omitempty"` Mode *IndexSpec_Mode `protobuf:"varint,6,opt,name=mode,enum=search.IndexSpec_Mode,def=0" json:"mode,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *IndexSpec) Reset() { *m = IndexSpec{} } func (m *IndexSpec) String() string { return proto.CompactTextString(m) } func (*IndexSpec) ProtoMessage() {} const Default_IndexSpec_Consistency IndexSpec_Consistency = IndexSpec_PER_DOCUMENT const Default_IndexSpec_Source IndexSpec_Source = IndexSpec_SEARCH const Default_IndexSpec_Mode IndexSpec_Mode = IndexSpec_PRIORITY func (m *IndexSpec) GetName() string { if m != nil && m.Name != nil { return *m.Name } return "" } func (m *IndexSpec) GetConsistency() IndexSpec_Consistency { if m != nil && m.Consistency != nil { return *m.Consistency } return Default_IndexSpec_Consistency } func (m *IndexSpec) GetNamespace() string { if m != nil && m.Namespace != nil { return *m.Namespace } return "" } func (m *IndexSpec) GetVersion() int32 { if m != nil && m.Version != nil { return *m.Version } return 0 } func (m *IndexSpec) GetSource() IndexSpec_Source { if m != nil && m.Source != nil { return *m.Source } return Default_IndexSpec_Source } func (m *IndexSpec) GetMode() IndexSpec_Mode { if m != nil && m.Mode != nil { return *m.Mode } return Default_IndexSpec_Mode } type IndexMetadata struct { IndexSpec *IndexSpec `protobuf:"bytes,1,req,name=index_spec" json:"index_spec,omitempty"` Field []*FieldTypes `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` Storage *IndexMetadata_Storage `protobuf:"bytes,3,opt,name=storage" json:"storage,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *IndexMetadata) Reset() { *m = IndexMetadata{} } func (m *IndexMetadata) String() string { return proto.CompactTextString(m) } func (*IndexMetadata) ProtoMessage() {} func (m *IndexMetadata) GetIndexSpec() *IndexSpec { if m != nil { return m.IndexSpec } return nil } func (m *IndexMetadata) GetField() []*FieldTypes { if m != nil { return m.Field } return nil } func (m *IndexMetadata) GetStorage() *IndexMetadata_Storage { if m != nil { return m.Storage } return nil } type IndexMetadata_Storage struct { AmountUsed *int64 `protobuf:"varint,1,opt,name=amount_used" json:"amount_used,omitempty"` Limit *int64 `protobuf:"varint,2,opt,name=limit" json:"limit,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *IndexMetadata_Storage) Reset() { *m = IndexMetadata_Storage{} } func (m *IndexMetadata_Storage) String() string { return proto.CompactTextString(m) } func (*IndexMetadata_Storage) ProtoMessage() {} func (m *IndexMetadata_Storage) GetAmountUsed() int64 { if m != nil && m.AmountUsed != nil { return *m.AmountUsed } return 0 } func (m *IndexMetadata_Storage) GetLimit() int64 { if m != nil && m.Limit != nil { return *m.Limit } return 0 } type IndexDocumentParams struct { Document []*Document `protobuf:"bytes,1,rep,name=document" json:"document,omitempty"` Freshness *IndexDocumentParams_Freshness `protobuf:"varint,2,opt,name=freshness,enum=search.IndexDocumentParams_Freshness,def=0" json:"freshness,omitempty"` IndexSpec *IndexSpec `protobuf:"bytes,3,req,name=index_spec" json:"index_spec,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *IndexDocumentParams) Reset() { *m = IndexDocumentParams{} } func (m *IndexDocumentParams) String() string { return proto.CompactTextString(m) } func (*IndexDocumentParams) ProtoMessage() {} const Default_IndexDocumentParams_Freshness IndexDocumentParams_Freshness = IndexDocumentParams_SYNCHRONOUSLY func (m *IndexDocumentParams) GetDocument() []*Document { if m != nil { return m.Document } return nil } func (m *IndexDocumentParams) GetFreshness() IndexDocumentParams_Freshness { if m != nil && m.Freshness != nil { return *m.Freshness } return Default_IndexDocumentParams_Freshness } func (m *IndexDocumentParams) GetIndexSpec() *IndexSpec { if m != nil { return m.IndexSpec } return nil } type IndexDocumentRequest struct { Params *IndexDocumentParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"` AppId []byte `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *IndexDocumentRequest) Reset() { *m = IndexDocumentRequest{} } func (m *IndexDocumentRequest) String() string { return proto.CompactTextString(m) } func (*IndexDocumentRequest) ProtoMessage() {} func (m *IndexDocumentRequest) GetParams() *IndexDocumentParams { if m != nil { return m.Params } return nil } func (m *IndexDocumentRequest) GetAppId() []byte { if m != nil { return m.AppId } return nil } type IndexDocumentResponse struct { Status []*RequestStatus `protobuf:"bytes,1,rep,name=status" json:"status,omitempty"` DocId []string `protobuf:"bytes,2,rep,name=doc_id" json:"doc_id,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *IndexDocumentResponse) Reset() { *m = IndexDocumentResponse{} } func (m *IndexDocumentResponse) String() string { return proto.CompactTextString(m) } func (*IndexDocumentResponse) ProtoMessage() {} func (m *IndexDocumentResponse) GetStatus() []*RequestStatus { if m != nil { return m.Status } return nil } func (m *IndexDocumentResponse) GetDocId() []string { if m != nil { return m.DocId } return nil } type DeleteDocumentParams struct { DocId []string `protobuf:"bytes,1,rep,name=doc_id" json:"doc_id,omitempty"` IndexSpec *IndexSpec `protobuf:"bytes,2,req,name=index_spec" json:"index_spec,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *DeleteDocumentParams) Reset() { *m = DeleteDocumentParams{} } func (m *DeleteDocumentParams) String() string { return proto.CompactTextString(m) } func (*DeleteDocumentParams) ProtoMessage() {} func (m *DeleteDocumentParams) GetDocId() []string { if m != nil { return m.DocId } return nil } func (m *DeleteDocumentParams) GetIndexSpec() *IndexSpec { if m != nil { return m.IndexSpec } return nil } type DeleteDocumentRequest struct { Params *DeleteDocumentParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"` AppId []byte `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *DeleteDocumentRequest) Reset() { *m = DeleteDocumentRequest{} } func (m *DeleteDocumentRequest) String() string { return proto.CompactTextString(m) } func (*DeleteDocumentRequest) ProtoMessage() {} func (m *DeleteDocumentRequest) GetParams() *DeleteDocumentParams { if m != nil { return m.Params } return nil } func (m *DeleteDocumentRequest) GetAppId() []byte { if m != nil { return m.AppId } return nil } type DeleteDocumentResponse struct { Status []*RequestStatus `protobuf:"bytes,1,rep,name=status" json:"status,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *DeleteDocumentResponse) Reset() { *m = DeleteDocumentResponse{} } func (m *DeleteDocumentResponse) String() string { return proto.CompactTextString(m) } func (*DeleteDocumentResponse) ProtoMessage() {} func (m *DeleteDocumentResponse) GetStatus() []*RequestStatus { if m != nil { return m.Status } return nil } type ListDocumentsParams struct { IndexSpec *IndexSpec `protobuf:"bytes,1,req,name=index_spec" json:"index_spec,omitempty"` StartDocId *string `protobuf:"bytes,2,opt,name=start_doc_id" json:"start_doc_id,omitempty"` IncludeStartDoc *bool `protobuf:"varint,3,opt,name=include_start_doc,def=1" json:"include_start_doc,omitempty"` Limit *int32 `protobuf:"varint,4,opt,name=limit,def=100" json:"limit,omitempty"` KeysOnly *bool `protobuf:"varint,5,opt,name=keys_only" json:"keys_only,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *ListDocumentsParams) Reset() { *m = ListDocumentsParams{} } func (m *ListDocumentsParams) String() string { return proto.CompactTextString(m) } func (*ListDocumentsParams) ProtoMessage() {} const Default_ListDocumentsParams_IncludeStartDoc bool = true const Default_ListDocumentsParams_Limit int32 = 100 func (m *ListDocumentsParams) GetIndexSpec() *IndexSpec { if m != nil { return m.IndexSpec } return nil } func (m *ListDocumentsParams) GetStartDocId() string { if m != nil && m.StartDocId != nil { return *m.StartDocId } return "" } func (m *ListDocumentsParams) GetIncludeStartDoc() bool { if m != nil && m.IncludeStartDoc != nil { return *m.IncludeStartDoc } return Default_ListDocumentsParams_IncludeStartDoc } func (m *ListDocumentsParams) GetLimit() int32 { if m != nil && m.Limit != nil { return *m.Limit } return Default_ListDocumentsParams_Limit } func (m *ListDocumentsParams) GetKeysOnly() bool { if m != nil && m.KeysOnly != nil { return *m.KeysOnly } return false } type ListDocumentsRequest struct { Params *ListDocumentsParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"` AppId []byte `protobuf:"bytes,2,opt,name=app_id" json:"app_id,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *ListDocumentsRequest) Reset() { *m = ListDocumentsRequest{} } func (m *ListDocumentsRequest) String() string { return proto.CompactTextString(m) } func (*ListDocumentsRequest) ProtoMessage() {} func (m *ListDocumentsRequest) GetParams() *ListDocumentsParams { if m != nil { return m.Params } return nil } func (m *ListDocumentsRequest) GetAppId() []byte { if m != nil { return m.AppId } return nil } type ListDocumentsResponse struct { Status *RequestStatus `protobuf:"bytes,1,req,name=status" json:"status,omitempty"` Document []*Document `protobuf:"bytes,2,rep,name=document" json:"document,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *ListDocumentsResponse) Reset() { *m = ListDocumentsResponse{} } func (m *ListDocumentsResponse) String() string { return proto.CompactTextString(m) } func (*ListDocumentsResponse) ProtoMessage() {} func (m *ListDocumentsResponse) GetStatus() *RequestStatus { if m != nil { return m.Status } return nil } func (m *ListDocumentsResponse) GetDocument() []*Document { if m != nil { return m.Document } return nil } type ListIndexesParams struct { FetchSchema *bool `protobuf:"varint,1,opt,name=fetch_schema" json:"fetch_schema,omitempty"` Limit *int32 `protobuf:"varint,2,opt,name=limit,def=20" json:"limit,omitempty"` Namespace *string `protobuf:"bytes,3,opt,name=namespace" json:"namespace,omitempty"` StartIndexName *string `protobuf:"bytes,4,opt,name=start_index_name" json:"start_index_name,omitempty"` IncludeStartIndex *bool `protobuf:"varint,5,opt,name=include_start_index,def=1" json:"include_start_index,omitempty"` IndexNamePrefix *string `protobuf:"bytes,6,opt,name=index_name_prefix" json:"index_name_prefix,omitempty"` Offset *int32 `protobuf:"varint,7,opt,name=offset" json:"offset,omitempty"` Source *IndexSpec_Source `protobuf:"varint,8,opt,name=source,enum=search.IndexSpec_Source,def=0" json:"source,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *ListIndexesParams) Reset() { *m = ListIndexesParams{} } func (m *ListIndexesParams) String() string { return proto.CompactTextString(m) } func (*ListIndexesParams) ProtoMessage() {} const Default_ListIndexesParams_Limit int32 = 20 const Default_ListIndexesParams_IncludeStartIndex bool = true const Default_ListIndexesParams_Source IndexSpec_Source = IndexSpec_SEARCH func (m *ListIndexesParams) GetFetchSchema() bool { if m != nil && m.FetchSchema != nil { return *m.FetchSchema } return false } func (m *ListIndexesParams) GetLimit() int32 { if m != nil && m.Limit != nil { return *m.Limit } return Default_ListIndexesParams_Limit } func (m *ListIndexesParams) GetNamespace() string { if m != nil && m.Namespace != nil { return *m.Namespace } return "" } func (m *ListIndexesParams) GetStartIndexName() string { if m != nil && m.StartIndexName != nil { return *m.StartIndexName } return "" } func (m *ListIndexesParams) GetIncludeStartIndex() bool { if m != nil && m.IncludeStartIndex != nil { return *m.IncludeStartIndex } return Default_ListIndexesParams_IncludeStartIndex } func (m *ListIndexesParams) GetIndexNamePrefix() string { if m != nil && m.IndexNamePrefix != nil { return *m.IndexNamePrefix } return "" } func (m *ListIndexesParams) GetOffset() int32 { if m != nil && m.Offset != nil { return *m.Offset } return 0 } func (m *ListIndexesParams) GetSource() IndexSpec_Source { if m != nil && m.Source != nil { return *m.Source } return Default_ListIndexesParams_Source } type ListIndexesRequest struct { Params *ListIndexesParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"` AppId []byte `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *ListIndexesRequest) Reset() { *m = ListIndexesRequest{} } func (m *ListIndexesRequest) String() string { return proto.CompactTextString(m) } func (*ListIndexesRequest) ProtoMessage() {} func (m *ListIndexesRequest) GetParams() *ListIndexesParams { if m != nil { return m.Params } return nil } func (m *ListIndexesRequest) GetAppId() []byte { if m != nil { return m.AppId } return nil } type ListIndexesResponse struct { Status *RequestStatus `protobuf:"bytes,1,req,name=status" json:"status,omitempty"` IndexMetadata []*IndexMetadata `protobuf:"bytes,2,rep,name=index_metadata" json:"index_metadata,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *ListIndexesResponse) Reset() { *m = ListIndexesResponse{} } func (m *ListIndexesResponse) String() string { return proto.CompactTextString(m) } func (*ListIndexesResponse) ProtoMessage() {} func (m *ListIndexesResponse) GetStatus() *RequestStatus { if m != nil { return m.Status } return nil } func (m *ListIndexesResponse) GetIndexMetadata() []*IndexMetadata { if m != nil { return m.IndexMetadata } return nil } type DeleteSchemaParams struct { Source *IndexSpec_Source `protobuf:"varint,1,opt,name=source,enum=search.IndexSpec_Source,def=0" json:"source,omitempty"` IndexSpec []*IndexSpec `protobuf:"bytes,2,rep,name=index_spec" json:"index_spec,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *DeleteSchemaParams) Reset() { *m = DeleteSchemaParams{} } func (m *DeleteSchemaParams) String() string { return proto.CompactTextString(m) } func (*DeleteSchemaParams) ProtoMessage() {} const Default_DeleteSchemaParams_Source IndexSpec_Source = IndexSpec_SEARCH func (m *DeleteSchemaParams) GetSource() IndexSpec_Source { if m != nil && m.Source != nil { return *m.Source } return Default_DeleteSchemaParams_Source } func (m *DeleteSchemaParams) GetIndexSpec() []*IndexSpec { if m != nil { return m.IndexSpec } return nil } type DeleteSchemaRequest struct { Params *DeleteSchemaParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"` AppId []byte `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *DeleteSchemaRequest) Reset() { *m = DeleteSchemaRequest{} } func (m *DeleteSchemaRequest) String() string { return proto.CompactTextString(m) } func (*DeleteSchemaRequest) ProtoMessage() {} func (m *DeleteSchemaRequest) GetParams() *DeleteSchemaParams { if m != nil { return m.Params } return nil } func (m *DeleteSchemaRequest) GetAppId() []byte { if m != nil { return m.AppId } return nil } type DeleteSchemaResponse struct { Status []*RequestStatus `protobuf:"bytes,1,rep,name=status" json:"status,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *DeleteSchemaResponse) Reset() { *m = DeleteSchemaResponse{} } func (m *DeleteSchemaResponse) String() string { return proto.CompactTextString(m) } func (*DeleteSchemaResponse) ProtoMessage() {} func (m *DeleteSchemaResponse) GetStatus() []*RequestStatus { if m != nil { return m.Status } return nil } type SortSpec struct { SortExpression *string `protobuf:"bytes,1,req,name=sort_expression" json:"sort_expression,omitempty"` SortDescending *bool `protobuf:"varint,2,opt,name=sort_descending,def=1" json:"sort_descending,omitempty"` DefaultValueText *string `protobuf:"bytes,4,opt,name=default_value_text" json:"default_value_text,omitempty"` DefaultValueNumeric *float64 `protobuf:"fixed64,5,opt,name=default_value_numeric" json:"default_value_numeric,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *SortSpec) Reset() { *m = SortSpec{} } func (m *SortSpec) String() string { return proto.CompactTextString(m) } func (*SortSpec) ProtoMessage() {} const Default_SortSpec_SortDescending bool = true func (m *SortSpec) GetSortExpression() string { if m != nil && m.SortExpression != nil { return *m.SortExpression } return "" } func (m *SortSpec) GetSortDescending() bool { if m != nil && m.SortDescending != nil { return *m.SortDescending } return Default_SortSpec_SortDescending } func (m *SortSpec) GetDefaultValueText() string { if m != nil && m.DefaultValueText != nil { return *m.DefaultValueText } return "" } func (m *SortSpec) GetDefaultValueNumeric() float64 { if m != nil && m.DefaultValueNumeric != nil { return *m.DefaultValueNumeric } return 0 } type ScorerSpec struct { Scorer *ScorerSpec_Scorer `protobuf:"varint,1,opt,name=scorer,enum=search.ScorerSpec_Scorer,def=2" json:"scorer,omitempty"` Limit *int32 `protobuf:"varint,2,opt,name=limit,def=1000" json:"limit,omitempty"` MatchScorerParameters *string `protobuf:"bytes,9,opt,name=match_scorer_parameters" json:"match_scorer_parameters,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *ScorerSpec) Reset() { *m = ScorerSpec{} } func (m *ScorerSpec) String() string { return proto.CompactTextString(m) } func (*ScorerSpec) ProtoMessage() {} const Default_ScorerSpec_Scorer ScorerSpec_Scorer = ScorerSpec_MATCH_SCORER const Default_ScorerSpec_Limit int32 = 1000 func (m *ScorerSpec) GetScorer() ScorerSpec_Scorer { if m != nil && m.Scorer != nil { return *m.Scorer } return Default_ScorerSpec_Scorer } func (m *ScorerSpec) GetLimit() int32 { if m != nil && m.Limit != nil { return *m.Limit } return Default_ScorerSpec_Limit } func (m *ScorerSpec) GetMatchScorerParameters() string { if m != nil && m.MatchScorerParameters != nil { return *m.MatchScorerParameters } return "" } type FieldSpec struct { Name []string `protobuf:"bytes,1,rep,name=name" json:"name,omitempty"` Expression []*FieldSpec_Expression `protobuf:"group,2,rep,name=Expression" json:"expression,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *FieldSpec) Reset() { *m = FieldSpec{} } func (m *FieldSpec) String() string { return proto.CompactTextString(m) } func (*FieldSpec) ProtoMessage() {} func (m *FieldSpec) GetName() []string { if m != nil { return m.Name } return nil } func (m *FieldSpec) GetExpression() []*FieldSpec_Expression { if m != nil { return m.Expression } return nil } type FieldSpec_Expression struct { Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"` Expression *string `protobuf:"bytes,4,req,name=expression" json:"expression,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *FieldSpec_Expression) Reset() { *m = FieldSpec_Expression{} } func (m *FieldSpec_Expression) String() string { return proto.CompactTextString(m) } func (*FieldSpec_Expression) ProtoMessage() {} func (m *FieldSpec_Expression) GetName() string { if m != nil && m.Name != nil { return *m.Name } return "" } func (m *FieldSpec_Expression) GetExpression() string { if m != nil && m.Expression != nil { return *m.Expression } return "" } type FacetRange struct { Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` Start *string `protobuf:"bytes,2,opt,name=start" json:"start,omitempty"` End *string `protobuf:"bytes,3,opt,name=end" json:"end,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *FacetRange) Reset() { *m = FacetRange{} } func (m *FacetRange) String() string { return proto.CompactTextString(m) } func (*FacetRange) ProtoMessage() {} func (m *FacetRange) GetName() string { if m != nil && m.Name != nil { return *m.Name } return "" } func (m *FacetRange) GetStart() string { if m != nil && m.Start != nil { return *m.Start } return "" } func (m *FacetRange) GetEnd() string { if m != nil && m.End != nil { return *m.End } return "" } type FacetRequestParam struct { ValueLimit *int32 `protobuf:"varint,1,opt,name=value_limit" json:"value_limit,omitempty"` Range []*FacetRange `protobuf:"bytes,2,rep,name=range" json:"range,omitempty"` ValueConstraint []string `protobuf:"bytes,3,rep,name=value_constraint" json:"value_constraint,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *FacetRequestParam) Reset() { *m = FacetRequestParam{} } func (m *FacetRequestParam) String() string { return proto.CompactTextString(m) } func (*FacetRequestParam) ProtoMessage() {} func (m *FacetRequestParam) GetValueLimit() int32 { if m != nil && m.ValueLimit != nil { return *m.ValueLimit } return 0 } func (m *FacetRequestParam) GetRange() []*FacetRange { if m != nil { return m.Range } return nil } func (m *FacetRequestParam) GetValueConstraint() []string { if m != nil { return m.ValueConstraint } return nil } type FacetAutoDetectParam struct { ValueLimit *int32 `protobuf:"varint,1,opt,name=value_limit,def=10" json:"value_limit,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *FacetAutoDetectParam) Reset() { *m = FacetAutoDetectParam{} } func (m *FacetAutoDetectParam) String() string { return proto.CompactTextString(m) } func (*FacetAutoDetectParam) ProtoMessage() {} const Default_FacetAutoDetectParam_ValueLimit int32 = 10 func (m *FacetAutoDetectParam) GetValueLimit() int32 { if m != nil && m.ValueLimit != nil { return *m.ValueLimit } return Default_FacetAutoDetectParam_ValueLimit } type FacetRequest struct { Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` Params *FacetRequestParam `protobuf:"bytes,2,opt,name=params" json:"params,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *FacetRequest) Reset() { *m = FacetRequest{} } func (m *FacetRequest) String() string { return proto.CompactTextString(m) } func (*FacetRequest) ProtoMessage() {} func (m *FacetRequest) GetName() string { if m != nil && m.Name != nil { return *m.Name } return "" } func (m *FacetRequest) GetParams() *FacetRequestParam { if m != nil { return m.Params } return nil } type FacetRefinement struct { Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` Range *FacetRefinement_Range `protobuf:"bytes,3,opt,name=range" json:"range,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *FacetRefinement) Reset() { *m = FacetRefinement{} } func (m *FacetRefinement) String() string { return proto.CompactTextString(m) } func (*FacetRefinement) ProtoMessage() {} func (m *FacetRefinement) GetName() string { if m != nil && m.Name != nil { return *m.Name } return "" } func (m *FacetRefinement) GetValue() string { if m != nil && m.Value != nil { return *m.Value } return "" } func (m *FacetRefinement) GetRange() *FacetRefinement_Range { if m != nil { return m.Range } return nil } type FacetRefinement_Range struct { Start *string `protobuf:"bytes,1,opt,name=start" json:"start,omitempty"` End *string `protobuf:"bytes,2,opt,name=end" json:"end,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *FacetRefinement_Range) Reset() { *m = FacetRefinement_Range{} } func (m *FacetRefinement_Range) String() string { return proto.CompactTextString(m) } func (*FacetRefinement_Range) ProtoMessage() {} func (m *FacetRefinement_Range) GetStart() string { if m != nil && m.Start != nil { return *m.Start } return "" } func (m *FacetRefinement_Range) GetEnd() string { if m != nil && m.End != nil { return *m.End } return "" } type SearchParams struct { IndexSpec *IndexSpec `protobuf:"bytes,1,req,name=index_spec" json:"index_spec,omitempty"` Query *string `protobuf:"bytes,2,req,name=query" json:"query,omitempty"` Cursor *string `protobuf:"bytes,4,opt,name=cursor" json:"cursor,omitempty"` Offset *int32 `protobuf:"varint,11,opt,name=offset" json:"offset,omitempty"` CursorType *SearchParams_CursorType `protobuf:"varint,5,opt,name=cursor_type,enum=search.SearchParams_CursorType,def=0" json:"cursor_type,omitempty"` Limit *int32 `protobuf:"varint,6,opt,name=limit,def=20" json:"limit,omitempty"` MatchedCountAccuracy *int32 `protobuf:"varint,7,opt,name=matched_count_accuracy" json:"matched_count_accuracy,omitempty"` SortSpec []*SortSpec `protobuf:"bytes,8,rep,name=sort_spec" json:"sort_spec,omitempty"` ScorerSpec *ScorerSpec `protobuf:"bytes,9,opt,name=scorer_spec" json:"scorer_spec,omitempty"` FieldSpec *FieldSpec `protobuf:"bytes,10,opt,name=field_spec" json:"field_spec,omitempty"` KeysOnly *bool `protobuf:"varint,12,opt,name=keys_only" json:"keys_only,omitempty"` ParsingMode *SearchParams_ParsingMode `protobuf:"varint,13,opt,name=parsing_mode,enum=search.SearchParams_ParsingMode,def=0" json:"parsing_mode,omitempty"` AutoDiscoverFacetCount *int32 `protobuf:"varint,15,opt,name=auto_discover_facet_count,def=0" json:"auto_discover_facet_count,omitempty"` IncludeFacet []*FacetRequest `protobuf:"bytes,16,rep,name=include_facet" json:"include_facet,omitempty"` FacetRefinement []*FacetRefinement `protobuf:"bytes,17,rep,name=facet_refinement" json:"facet_refinement,omitempty"` FacetAutoDetectParam *FacetAutoDetectParam `protobuf:"bytes,18,opt,name=facet_auto_detect_param" json:"facet_auto_detect_param,omitempty"` FacetDepth *int32 `protobuf:"varint,19,opt,name=facet_depth,def=1000" json:"facet_depth,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *SearchParams) Reset() { *m = SearchParams{} } func (m *SearchParams) String() string { return proto.CompactTextString(m) } func (*SearchParams) ProtoMessage() {} const Default_SearchParams_CursorType SearchParams_CursorType = SearchParams_NONE const Default_SearchParams_Limit int32 = 20 const Default_SearchParams_ParsingMode SearchParams_ParsingMode = SearchParams_STRICT const Default_SearchParams_AutoDiscoverFacetCount int32 = 0 const Default_SearchParams_FacetDepth int32 = 1000 func (m *SearchParams) GetIndexSpec() *IndexSpec { if m != nil { return m.IndexSpec } return nil } func (m *SearchParams) GetQuery() string { if m != nil && m.Query != nil { return *m.Query } return "" } func (m *SearchParams) GetCursor() string { if m != nil && m.Cursor != nil { return *m.Cursor } return "" } func (m *SearchParams) GetOffset() int32 { if m != nil && m.Offset != nil { return *m.Offset } return 0 } func (m *SearchParams) GetCursorType() SearchParams_CursorType { if m != nil && m.CursorType != nil { return *m.CursorType } return Default_SearchParams_CursorType } func (m *SearchParams) GetLimit() int32 { if m != nil && m.Limit != nil { return *m.Limit } return Default_SearchParams_Limit } func (m *SearchParams) GetMatchedCountAccuracy() int32 { if m != nil && m.MatchedCountAccuracy != nil { return *m.MatchedCountAccuracy } return 0 } func (m *SearchParams) GetSortSpec() []*SortSpec { if m != nil { return m.SortSpec } return nil } func (m *SearchParams) GetScorerSpec() *ScorerSpec { if m != nil { return m.ScorerSpec } return nil } func (m *SearchParams) GetFieldSpec() *FieldSpec { if m != nil { return m.FieldSpec } return nil } func (m *SearchParams) GetKeysOnly() bool { if m != nil && m.KeysOnly != nil { return *m.KeysOnly } return false } func (m *SearchParams) GetParsingMode() SearchParams_ParsingMode { if m != nil && m.ParsingMode != nil { return *m.ParsingMode } return Default_SearchParams_ParsingMode } func (m *SearchParams) GetAutoDiscoverFacetCount() int32 { if m != nil && m.AutoDiscoverFacetCount != nil { return *m.AutoDiscoverFacetCount } return Default_SearchParams_AutoDiscoverFacetCount } func (m *SearchParams) GetIncludeFacet() []*FacetRequest { if m != nil { return m.IncludeFacet } return nil } func (m *SearchParams) GetFacetRefinement() []*FacetRefinement { if m != nil { return m.FacetRefinement } return nil } func (m *SearchParams) GetFacetAutoDetectParam() *FacetAutoDetectParam { if m != nil { return m.FacetAutoDetectParam } return nil } func (m *SearchParams) GetFacetDepth() int32 { if m != nil && m.FacetDepth != nil { return *m.FacetDepth } return Default_SearchParams_FacetDepth } type SearchRequest struct { Params *SearchParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"` AppId []byte `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *SearchRequest) Reset() { *m = SearchRequest{} } func (m *SearchRequest) String() string { return proto.CompactTextString(m) } func (*SearchRequest) ProtoMessage() {} func (m *SearchRequest) GetParams() *SearchParams { if m != nil { return m.Params } return nil } func (m *SearchRequest) GetAppId() []byte { if m != nil { return m.AppId } return nil } type FacetResultValue struct { Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` Count *int32 `protobuf:"varint,2,req,name=count" json:"count,omitempty"` Refinement *FacetRefinement `protobuf:"bytes,3,req,name=refinement" json:"refinement,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *FacetResultValue) Reset() { *m = FacetResultValue{} } func (m *FacetResultValue) String() string { return proto.CompactTextString(m) } func (*FacetResultValue) ProtoMessage() {} func (m *FacetResultValue) GetName() string { if m != nil && m.Name != nil { return *m.Name } return "" } func (m *FacetResultValue) GetCount() int32 { if m != nil && m.Count != nil { return *m.Count } return 0 } func (m *FacetResultValue) GetRefinement() *FacetRefinement { if m != nil { return m.Refinement } return nil } type FacetResult struct { Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` Value []*FacetResultValue `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *FacetResult) Reset() { *m = FacetResult{} } func (m *FacetResult) String() string { return proto.CompactTextString(m) } func (*FacetResult) ProtoMessage() {} func (m *FacetResult) GetName() string { if m != nil && m.Name != nil { return *m.Name } return "" } func (m *FacetResult) GetValue() []*FacetResultValue { if m != nil { return m.Value } return nil } type SearchResult struct { Document *Document `protobuf:"bytes,1,req,name=document" json:"document,omitempty"` Expression []*Field `protobuf:"bytes,4,rep,name=expression" json:"expression,omitempty"` Score []float64 `protobuf:"fixed64,2,rep,name=score" json:"score,omitempty"` Cursor *string `protobuf:"bytes,3,opt,name=cursor" json:"cursor,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *SearchResult) Reset() { *m = SearchResult{} } func (m *SearchResult) String() string { return proto.CompactTextString(m) } func (*SearchResult) ProtoMessage() {} func (m *SearchResult) GetDocument() *Document { if m != nil { return m.Document } return nil } func (m *SearchResult) GetExpression() []*Field { if m != nil { return m.Expression } return nil } func (m *SearchResult) GetScore() []float64 { if m != nil { return m.Score } return nil } func (m *SearchResult) GetCursor() string { if m != nil && m.Cursor != nil { return *m.Cursor } return "" } type SearchResponse struct { Result []*SearchResult `protobuf:"bytes,1,rep,name=result" json:"result,omitempty"` MatchedCount *int64 `protobuf:"varint,2,req,name=matched_count" json:"matched_count,omitempty"` Status *RequestStatus `protobuf:"bytes,3,req,name=status" json:"status,omitempty"` Cursor *string `protobuf:"bytes,4,opt,name=cursor" json:"cursor,omitempty"` FacetResult []*FacetResult `protobuf:"bytes,5,rep,name=facet_result" json:"facet_result,omitempty"` XXX_extensions map[int32]proto.Extension `json:"-"` XXX_unrecognized []byte `json:"-"` } func (m *SearchResponse) Reset() { *m = SearchResponse{} } func (m *SearchResponse) String() string { return proto.CompactTextString(m) } func (*SearchResponse) ProtoMessage() {} var extRange_SearchResponse = []proto.ExtensionRange{ {1000, 9999}, } func (*SearchResponse) ExtensionRangeArray() []proto.ExtensionRange { return extRange_SearchResponse } func (m *SearchResponse) ExtensionMap() map[int32]proto.Extension { if m.XXX_extensions == nil { m.XXX_extensions = make(map[int32]proto.Extension) } return m.XXX_extensions } func (m *SearchResponse) GetResult() []*SearchResult { if m != nil { return m.Result } return nil } func (m *SearchResponse) GetMatchedCount() int64 { if m != nil && m.MatchedCount != nil { return *m.MatchedCount } return 0 } func (m *SearchResponse) GetStatus() *RequestStatus { if m != nil { return m.Status } return nil } func (m *SearchResponse) GetCursor() string { if m != nil && m.Cursor != nil { return *m.Cursor } return "" } func (m *SearchResponse) GetFacetResult() []*FacetResult { if m != nil { return m.FacetResult } return nil } func init() { } ================================================ FILE: vendor/google.golang.org/appengine/internal/search/search.proto ================================================ syntax = "proto2"; option go_package = "search"; package search; message Scope { enum Type { USER_BY_CANONICAL_ID = 1; USER_BY_EMAIL = 2; GROUP_BY_CANONICAL_ID = 3; GROUP_BY_EMAIL = 4; GROUP_BY_DOMAIN = 5; ALL_USERS = 6; ALL_AUTHENTICATED_USERS = 7; } optional Type type = 1; optional string value = 2; } message Entry { enum Permission { READ = 1; WRITE = 2; FULL_CONTROL = 3; } optional Scope scope = 1; optional Permission permission = 2; optional string display_name = 3; } message AccessControlList { optional string owner = 1; repeated Entry entries = 2; } message FieldValue { enum ContentType { TEXT = 0; HTML = 1; ATOM = 2; DATE = 3; NUMBER = 4; GEO = 5; } optional ContentType type = 1 [default = TEXT]; optional string language = 2 [default = "en"]; optional string string_value = 3; optional group Geo = 4 { required double lat = 5; required double lng = 6; } } message Field { required string name = 1; required FieldValue value = 2; } message FieldTypes { required string name = 1; repeated FieldValue.ContentType type = 2; } message IndexShardSettings { repeated int32 prev_num_shards = 1; required int32 num_shards = 2 [default=1]; repeated int32 prev_num_shards_search_false = 3; optional string local_replica = 4 [default = ""]; } message FacetValue { enum ContentType { ATOM = 2; NUMBER = 4; } optional ContentType type = 1 [default = ATOM]; optional string string_value = 3; } message Facet { required string name = 1; required FacetValue value = 2; } message DocumentMetadata { optional int64 version = 1; optional int64 committed_st_version = 2; } message Document { optional string id = 1; optional string language = 2 [default = "en"]; repeated Field field = 3; optional int32 order_id = 4; enum Storage { DISK = 0; } optional Storage storage = 5 [default = DISK]; repeated Facet facet = 8; } message SearchServiceError { enum ErrorCode { OK = 0; INVALID_REQUEST = 1; TRANSIENT_ERROR = 2; INTERNAL_ERROR = 3; PERMISSION_DENIED = 4; TIMEOUT = 5; CONCURRENT_TRANSACTION = 6; } } message RequestStatus { required SearchServiceError.ErrorCode code = 1; optional string error_detail = 2; optional int32 canonical_code = 3; } message IndexSpec { required string name = 1; enum Consistency { GLOBAL = 0; PER_DOCUMENT = 1; } optional Consistency consistency = 2 [default = PER_DOCUMENT]; optional string namespace = 3; optional int32 version = 4; enum Source { SEARCH = 0; DATASTORE = 1; CLOUD_STORAGE = 2; } optional Source source = 5 [default = SEARCH]; enum Mode { PRIORITY = 0; BACKGROUND = 1; } optional Mode mode = 6 [default = PRIORITY]; } message IndexMetadata { required IndexSpec index_spec = 1; repeated FieldTypes field = 2; message Storage { optional int64 amount_used = 1; optional int64 limit = 2; } optional Storage storage = 3; } message IndexDocumentParams { repeated Document document = 1; enum Freshness { SYNCHRONOUSLY = 0; WHEN_CONVENIENT = 1; } optional Freshness freshness = 2 [default = SYNCHRONOUSLY, deprecated=true]; required IndexSpec index_spec = 3; } message IndexDocumentRequest { required IndexDocumentParams params = 1; optional bytes app_id = 3; } message IndexDocumentResponse { repeated RequestStatus status = 1; repeated string doc_id = 2; } message DeleteDocumentParams { repeated string doc_id = 1; required IndexSpec index_spec = 2; } message DeleteDocumentRequest { required DeleteDocumentParams params = 1; optional bytes app_id = 3; } message DeleteDocumentResponse { repeated RequestStatus status = 1; } message ListDocumentsParams { required IndexSpec index_spec = 1; optional string start_doc_id = 2; optional bool include_start_doc = 3 [default = true]; optional int32 limit = 4 [default = 100]; optional bool keys_only = 5; } message ListDocumentsRequest { required ListDocumentsParams params = 1; optional bytes app_id = 2; } message ListDocumentsResponse { required RequestStatus status = 1; repeated Document document = 2; } message ListIndexesParams { optional bool fetch_schema = 1; optional int32 limit = 2 [default = 20]; optional string namespace = 3; optional string start_index_name = 4; optional bool include_start_index = 5 [default = true]; optional string index_name_prefix = 6; optional int32 offset = 7; optional IndexSpec.Source source = 8 [default = SEARCH]; } message ListIndexesRequest { required ListIndexesParams params = 1; optional bytes app_id = 3; } message ListIndexesResponse { required RequestStatus status = 1; repeated IndexMetadata index_metadata = 2; } message DeleteSchemaParams { optional IndexSpec.Source source = 1 [default = SEARCH]; repeated IndexSpec index_spec = 2; } message DeleteSchemaRequest { required DeleteSchemaParams params = 1; optional bytes app_id = 3; } message DeleteSchemaResponse { repeated RequestStatus status = 1; } message SortSpec { required string sort_expression = 1; optional bool sort_descending = 2 [default = true]; optional string default_value_text = 4; optional double default_value_numeric = 5; } message ScorerSpec { enum Scorer { RESCORING_MATCH_SCORER = 0; MATCH_SCORER = 2; } optional Scorer scorer = 1 [default = MATCH_SCORER]; optional int32 limit = 2 [default = 1000]; optional string match_scorer_parameters = 9; } message FieldSpec { repeated string name = 1; repeated group Expression = 2 { required string name = 3; required string expression = 4; } } message FacetRange { optional string name = 1; optional string start = 2; optional string end = 3; } message FacetRequestParam { optional int32 value_limit = 1; repeated FacetRange range = 2; repeated string value_constraint = 3; } message FacetAutoDetectParam { optional int32 value_limit = 1 [default = 10]; } message FacetRequest { required string name = 1; optional FacetRequestParam params = 2; } message FacetRefinement { required string name = 1; optional string value = 2; message Range { optional string start = 1; optional string end = 2; } optional Range range = 3; } message SearchParams { required IndexSpec index_spec = 1; required string query = 2; optional string cursor = 4; optional int32 offset = 11; enum CursorType { NONE = 0; SINGLE = 1; PER_RESULT = 2; } optional CursorType cursor_type = 5 [default = NONE]; optional int32 limit = 6 [default = 20]; optional int32 matched_count_accuracy = 7; repeated SortSpec sort_spec = 8; optional ScorerSpec scorer_spec = 9; optional FieldSpec field_spec = 10; optional bool keys_only = 12; enum ParsingMode { STRICT = 0; RELAXED = 1; } optional ParsingMode parsing_mode = 13 [default = STRICT]; optional int32 auto_discover_facet_count = 15 [default = 0]; repeated FacetRequest include_facet = 16; repeated FacetRefinement facet_refinement = 17; optional FacetAutoDetectParam facet_auto_detect_param = 18; optional int32 facet_depth = 19 [default=1000]; } message SearchRequest { required SearchParams params = 1; optional bytes app_id = 3; } message FacetResultValue { required string name = 1; required int32 count = 2; required FacetRefinement refinement = 3; } message FacetResult { required string name = 1; repeated FacetResultValue value = 2; } message SearchResult { required Document document = 1; repeated Field expression = 4; repeated double score = 2; optional string cursor = 3; } message SearchResponse { repeated SearchResult result = 1; required int64 matched_count = 2; required RequestStatus status = 3; optional string cursor = 4; repeated FacetResult facet_result = 5; extensions 1000 to 9999; } ================================================ FILE: vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go ================================================ // Code generated by protoc-gen-go. // source: google.golang.org/appengine/internal/socket/socket_service.proto // DO NOT EDIT! /* Package socket is a generated protocol buffer package. It is generated from these files: google.golang.org/appengine/internal/socket/socket_service.proto It has these top-level messages: RemoteSocketServiceError AddressPort CreateSocketRequest CreateSocketReply BindRequest BindReply GetSocketNameRequest GetSocketNameReply GetPeerNameRequest GetPeerNameReply SocketOption SetSocketOptionsRequest SetSocketOptionsReply GetSocketOptionsRequest GetSocketOptionsReply ConnectRequest ConnectReply ListenRequest ListenReply AcceptRequest AcceptReply ShutDownRequest ShutDownReply CloseRequest CloseReply SendRequest SendReply ReceiveRequest ReceiveReply PollEvent PollRequest PollReply ResolveRequest ResolveReply */ package socket import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf type RemoteSocketServiceError_ErrorCode int32 const ( RemoteSocketServiceError_SYSTEM_ERROR RemoteSocketServiceError_ErrorCode = 1 RemoteSocketServiceError_GAI_ERROR RemoteSocketServiceError_ErrorCode = 2 RemoteSocketServiceError_FAILURE RemoteSocketServiceError_ErrorCode = 4 RemoteSocketServiceError_PERMISSION_DENIED RemoteSocketServiceError_ErrorCode = 5 RemoteSocketServiceError_INVALID_REQUEST RemoteSocketServiceError_ErrorCode = 6 RemoteSocketServiceError_SOCKET_CLOSED RemoteSocketServiceError_ErrorCode = 7 ) var RemoteSocketServiceError_ErrorCode_name = map[int32]string{ 1: "SYSTEM_ERROR", 2: "GAI_ERROR", 4: "FAILURE", 5: "PERMISSION_DENIED", 6: "INVALID_REQUEST", 7: "SOCKET_CLOSED", } var RemoteSocketServiceError_ErrorCode_value = map[string]int32{ "SYSTEM_ERROR": 1, "GAI_ERROR": 2, "FAILURE": 4, "PERMISSION_DENIED": 5, "INVALID_REQUEST": 6, "SOCKET_CLOSED": 7, } func (x RemoteSocketServiceError_ErrorCode) Enum() *RemoteSocketServiceError_ErrorCode { p := new(RemoteSocketServiceError_ErrorCode) *p = x return p } func (x RemoteSocketServiceError_ErrorCode) String() string { return proto.EnumName(RemoteSocketServiceError_ErrorCode_name, int32(x)) } func (x *RemoteSocketServiceError_ErrorCode) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_ErrorCode_value, data, "RemoteSocketServiceError_ErrorCode") if err != nil { return err } *x = RemoteSocketServiceError_ErrorCode(value) return nil } type RemoteSocketServiceError_SystemError int32 const ( RemoteSocketServiceError_SYS_SUCCESS RemoteSocketServiceError_SystemError = 0 RemoteSocketServiceError_SYS_EPERM RemoteSocketServiceError_SystemError = 1 RemoteSocketServiceError_SYS_ENOENT RemoteSocketServiceError_SystemError = 2 RemoteSocketServiceError_SYS_ESRCH RemoteSocketServiceError_SystemError = 3 RemoteSocketServiceError_SYS_EINTR RemoteSocketServiceError_SystemError = 4 RemoteSocketServiceError_SYS_EIO RemoteSocketServiceError_SystemError = 5 RemoteSocketServiceError_SYS_ENXIO RemoteSocketServiceError_SystemError = 6 RemoteSocketServiceError_SYS_E2BIG RemoteSocketServiceError_SystemError = 7 RemoteSocketServiceError_SYS_ENOEXEC RemoteSocketServiceError_SystemError = 8 RemoteSocketServiceError_SYS_EBADF RemoteSocketServiceError_SystemError = 9 RemoteSocketServiceError_SYS_ECHILD RemoteSocketServiceError_SystemError = 10 RemoteSocketServiceError_SYS_EAGAIN RemoteSocketServiceError_SystemError = 11 RemoteSocketServiceError_SYS_EWOULDBLOCK RemoteSocketServiceError_SystemError = 11 RemoteSocketServiceError_SYS_ENOMEM RemoteSocketServiceError_SystemError = 12 RemoteSocketServiceError_SYS_EACCES RemoteSocketServiceError_SystemError = 13 RemoteSocketServiceError_SYS_EFAULT RemoteSocketServiceError_SystemError = 14 RemoteSocketServiceError_SYS_ENOTBLK RemoteSocketServiceError_SystemError = 15 RemoteSocketServiceError_SYS_EBUSY RemoteSocketServiceError_SystemError = 16 RemoteSocketServiceError_SYS_EEXIST RemoteSocketServiceError_SystemError = 17 RemoteSocketServiceError_SYS_EXDEV RemoteSocketServiceError_SystemError = 18 RemoteSocketServiceError_SYS_ENODEV RemoteSocketServiceError_SystemError = 19 RemoteSocketServiceError_SYS_ENOTDIR RemoteSocketServiceError_SystemError = 20 RemoteSocketServiceError_SYS_EISDIR RemoteSocketServiceError_SystemError = 21 RemoteSocketServiceError_SYS_EINVAL RemoteSocketServiceError_SystemError = 22 RemoteSocketServiceError_SYS_ENFILE RemoteSocketServiceError_SystemError = 23 RemoteSocketServiceError_SYS_EMFILE RemoteSocketServiceError_SystemError = 24 RemoteSocketServiceError_SYS_ENOTTY RemoteSocketServiceError_SystemError = 25 RemoteSocketServiceError_SYS_ETXTBSY RemoteSocketServiceError_SystemError = 26 RemoteSocketServiceError_SYS_EFBIG RemoteSocketServiceError_SystemError = 27 RemoteSocketServiceError_SYS_ENOSPC RemoteSocketServiceError_SystemError = 28 RemoteSocketServiceError_SYS_ESPIPE RemoteSocketServiceError_SystemError = 29 RemoteSocketServiceError_SYS_EROFS RemoteSocketServiceError_SystemError = 30 RemoteSocketServiceError_SYS_EMLINK RemoteSocketServiceError_SystemError = 31 RemoteSocketServiceError_SYS_EPIPE RemoteSocketServiceError_SystemError = 32 RemoteSocketServiceError_SYS_EDOM RemoteSocketServiceError_SystemError = 33 RemoteSocketServiceError_SYS_ERANGE RemoteSocketServiceError_SystemError = 34 RemoteSocketServiceError_SYS_EDEADLK RemoteSocketServiceError_SystemError = 35 RemoteSocketServiceError_SYS_EDEADLOCK RemoteSocketServiceError_SystemError = 35 RemoteSocketServiceError_SYS_ENAMETOOLONG RemoteSocketServiceError_SystemError = 36 RemoteSocketServiceError_SYS_ENOLCK RemoteSocketServiceError_SystemError = 37 RemoteSocketServiceError_SYS_ENOSYS RemoteSocketServiceError_SystemError = 38 RemoteSocketServiceError_SYS_ENOTEMPTY RemoteSocketServiceError_SystemError = 39 RemoteSocketServiceError_SYS_ELOOP RemoteSocketServiceError_SystemError = 40 RemoteSocketServiceError_SYS_ENOMSG RemoteSocketServiceError_SystemError = 42 RemoteSocketServiceError_SYS_EIDRM RemoteSocketServiceError_SystemError = 43 RemoteSocketServiceError_SYS_ECHRNG RemoteSocketServiceError_SystemError = 44 RemoteSocketServiceError_SYS_EL2NSYNC RemoteSocketServiceError_SystemError = 45 RemoteSocketServiceError_SYS_EL3HLT RemoteSocketServiceError_SystemError = 46 RemoteSocketServiceError_SYS_EL3RST RemoteSocketServiceError_SystemError = 47 RemoteSocketServiceError_SYS_ELNRNG RemoteSocketServiceError_SystemError = 48 RemoteSocketServiceError_SYS_EUNATCH RemoteSocketServiceError_SystemError = 49 RemoteSocketServiceError_SYS_ENOCSI RemoteSocketServiceError_SystemError = 50 RemoteSocketServiceError_SYS_EL2HLT RemoteSocketServiceError_SystemError = 51 RemoteSocketServiceError_SYS_EBADE RemoteSocketServiceError_SystemError = 52 RemoteSocketServiceError_SYS_EBADR RemoteSocketServiceError_SystemError = 53 RemoteSocketServiceError_SYS_EXFULL RemoteSocketServiceError_SystemError = 54 RemoteSocketServiceError_SYS_ENOANO RemoteSocketServiceError_SystemError = 55 RemoteSocketServiceError_SYS_EBADRQC RemoteSocketServiceError_SystemError = 56 RemoteSocketServiceError_SYS_EBADSLT RemoteSocketServiceError_SystemError = 57 RemoteSocketServiceError_SYS_EBFONT RemoteSocketServiceError_SystemError = 59 RemoteSocketServiceError_SYS_ENOSTR RemoteSocketServiceError_SystemError = 60 RemoteSocketServiceError_SYS_ENODATA RemoteSocketServiceError_SystemError = 61 RemoteSocketServiceError_SYS_ETIME RemoteSocketServiceError_SystemError = 62 RemoteSocketServiceError_SYS_ENOSR RemoteSocketServiceError_SystemError = 63 RemoteSocketServiceError_SYS_ENONET RemoteSocketServiceError_SystemError = 64 RemoteSocketServiceError_SYS_ENOPKG RemoteSocketServiceError_SystemError = 65 RemoteSocketServiceError_SYS_EREMOTE RemoteSocketServiceError_SystemError = 66 RemoteSocketServiceError_SYS_ENOLINK RemoteSocketServiceError_SystemError = 67 RemoteSocketServiceError_SYS_EADV RemoteSocketServiceError_SystemError = 68 RemoteSocketServiceError_SYS_ESRMNT RemoteSocketServiceError_SystemError = 69 RemoteSocketServiceError_SYS_ECOMM RemoteSocketServiceError_SystemError = 70 RemoteSocketServiceError_SYS_EPROTO RemoteSocketServiceError_SystemError = 71 RemoteSocketServiceError_SYS_EMULTIHOP RemoteSocketServiceError_SystemError = 72 RemoteSocketServiceError_SYS_EDOTDOT RemoteSocketServiceError_SystemError = 73 RemoteSocketServiceError_SYS_EBADMSG RemoteSocketServiceError_SystemError = 74 RemoteSocketServiceError_SYS_EOVERFLOW RemoteSocketServiceError_SystemError = 75 RemoteSocketServiceError_SYS_ENOTUNIQ RemoteSocketServiceError_SystemError = 76 RemoteSocketServiceError_SYS_EBADFD RemoteSocketServiceError_SystemError = 77 RemoteSocketServiceError_SYS_EREMCHG RemoteSocketServiceError_SystemError = 78 RemoteSocketServiceError_SYS_ELIBACC RemoteSocketServiceError_SystemError = 79 RemoteSocketServiceError_SYS_ELIBBAD RemoteSocketServiceError_SystemError = 80 RemoteSocketServiceError_SYS_ELIBSCN RemoteSocketServiceError_SystemError = 81 RemoteSocketServiceError_SYS_ELIBMAX RemoteSocketServiceError_SystemError = 82 RemoteSocketServiceError_SYS_ELIBEXEC RemoteSocketServiceError_SystemError = 83 RemoteSocketServiceError_SYS_EILSEQ RemoteSocketServiceError_SystemError = 84 RemoteSocketServiceError_SYS_ERESTART RemoteSocketServiceError_SystemError = 85 RemoteSocketServiceError_SYS_ESTRPIPE RemoteSocketServiceError_SystemError = 86 RemoteSocketServiceError_SYS_EUSERS RemoteSocketServiceError_SystemError = 87 RemoteSocketServiceError_SYS_ENOTSOCK RemoteSocketServiceError_SystemError = 88 RemoteSocketServiceError_SYS_EDESTADDRREQ RemoteSocketServiceError_SystemError = 89 RemoteSocketServiceError_SYS_EMSGSIZE RemoteSocketServiceError_SystemError = 90 RemoteSocketServiceError_SYS_EPROTOTYPE RemoteSocketServiceError_SystemError = 91 RemoteSocketServiceError_SYS_ENOPROTOOPT RemoteSocketServiceError_SystemError = 92 RemoteSocketServiceError_SYS_EPROTONOSUPPORT RemoteSocketServiceError_SystemError = 93 RemoteSocketServiceError_SYS_ESOCKTNOSUPPORT RemoteSocketServiceError_SystemError = 94 RemoteSocketServiceError_SYS_EOPNOTSUPP RemoteSocketServiceError_SystemError = 95 RemoteSocketServiceError_SYS_ENOTSUP RemoteSocketServiceError_SystemError = 95 RemoteSocketServiceError_SYS_EPFNOSUPPORT RemoteSocketServiceError_SystemError = 96 RemoteSocketServiceError_SYS_EAFNOSUPPORT RemoteSocketServiceError_SystemError = 97 RemoteSocketServiceError_SYS_EADDRINUSE RemoteSocketServiceError_SystemError = 98 RemoteSocketServiceError_SYS_EADDRNOTAVAIL RemoteSocketServiceError_SystemError = 99 RemoteSocketServiceError_SYS_ENETDOWN RemoteSocketServiceError_SystemError = 100 RemoteSocketServiceError_SYS_ENETUNREACH RemoteSocketServiceError_SystemError = 101 RemoteSocketServiceError_SYS_ENETRESET RemoteSocketServiceError_SystemError = 102 RemoteSocketServiceError_SYS_ECONNABORTED RemoteSocketServiceError_SystemError = 103 RemoteSocketServiceError_SYS_ECONNRESET RemoteSocketServiceError_SystemError = 104 RemoteSocketServiceError_SYS_ENOBUFS RemoteSocketServiceError_SystemError = 105 RemoteSocketServiceError_SYS_EISCONN RemoteSocketServiceError_SystemError = 106 RemoteSocketServiceError_SYS_ENOTCONN RemoteSocketServiceError_SystemError = 107 RemoteSocketServiceError_SYS_ESHUTDOWN RemoteSocketServiceError_SystemError = 108 RemoteSocketServiceError_SYS_ETOOMANYREFS RemoteSocketServiceError_SystemError = 109 RemoteSocketServiceError_SYS_ETIMEDOUT RemoteSocketServiceError_SystemError = 110 RemoteSocketServiceError_SYS_ECONNREFUSED RemoteSocketServiceError_SystemError = 111 RemoteSocketServiceError_SYS_EHOSTDOWN RemoteSocketServiceError_SystemError = 112 RemoteSocketServiceError_SYS_EHOSTUNREACH RemoteSocketServiceError_SystemError = 113 RemoteSocketServiceError_SYS_EALREADY RemoteSocketServiceError_SystemError = 114 RemoteSocketServiceError_SYS_EINPROGRESS RemoteSocketServiceError_SystemError = 115 RemoteSocketServiceError_SYS_ESTALE RemoteSocketServiceError_SystemError = 116 RemoteSocketServiceError_SYS_EUCLEAN RemoteSocketServiceError_SystemError = 117 RemoteSocketServiceError_SYS_ENOTNAM RemoteSocketServiceError_SystemError = 118 RemoteSocketServiceError_SYS_ENAVAIL RemoteSocketServiceError_SystemError = 119 RemoteSocketServiceError_SYS_EISNAM RemoteSocketServiceError_SystemError = 120 RemoteSocketServiceError_SYS_EREMOTEIO RemoteSocketServiceError_SystemError = 121 RemoteSocketServiceError_SYS_EDQUOT RemoteSocketServiceError_SystemError = 122 RemoteSocketServiceError_SYS_ENOMEDIUM RemoteSocketServiceError_SystemError = 123 RemoteSocketServiceError_SYS_EMEDIUMTYPE RemoteSocketServiceError_SystemError = 124 RemoteSocketServiceError_SYS_ECANCELED RemoteSocketServiceError_SystemError = 125 RemoteSocketServiceError_SYS_ENOKEY RemoteSocketServiceError_SystemError = 126 RemoteSocketServiceError_SYS_EKEYEXPIRED RemoteSocketServiceError_SystemError = 127 RemoteSocketServiceError_SYS_EKEYREVOKED RemoteSocketServiceError_SystemError = 128 RemoteSocketServiceError_SYS_EKEYREJECTED RemoteSocketServiceError_SystemError = 129 RemoteSocketServiceError_SYS_EOWNERDEAD RemoteSocketServiceError_SystemError = 130 RemoteSocketServiceError_SYS_ENOTRECOVERABLE RemoteSocketServiceError_SystemError = 131 RemoteSocketServiceError_SYS_ERFKILL RemoteSocketServiceError_SystemError = 132 ) var RemoteSocketServiceError_SystemError_name = map[int32]string{ 0: "SYS_SUCCESS", 1: "SYS_EPERM", 2: "SYS_ENOENT", 3: "SYS_ESRCH", 4: "SYS_EINTR", 5: "SYS_EIO", 6: "SYS_ENXIO", 7: "SYS_E2BIG", 8: "SYS_ENOEXEC", 9: "SYS_EBADF", 10: "SYS_ECHILD", 11: "SYS_EAGAIN", // Duplicate value: 11: "SYS_EWOULDBLOCK", 12: "SYS_ENOMEM", 13: "SYS_EACCES", 14: "SYS_EFAULT", 15: "SYS_ENOTBLK", 16: "SYS_EBUSY", 17: "SYS_EEXIST", 18: "SYS_EXDEV", 19: "SYS_ENODEV", 20: "SYS_ENOTDIR", 21: "SYS_EISDIR", 22: "SYS_EINVAL", 23: "SYS_ENFILE", 24: "SYS_EMFILE", 25: "SYS_ENOTTY", 26: "SYS_ETXTBSY", 27: "SYS_EFBIG", 28: "SYS_ENOSPC", 29: "SYS_ESPIPE", 30: "SYS_EROFS", 31: "SYS_EMLINK", 32: "SYS_EPIPE", 33: "SYS_EDOM", 34: "SYS_ERANGE", 35: "SYS_EDEADLK", // Duplicate value: 35: "SYS_EDEADLOCK", 36: "SYS_ENAMETOOLONG", 37: "SYS_ENOLCK", 38: "SYS_ENOSYS", 39: "SYS_ENOTEMPTY", 40: "SYS_ELOOP", 42: "SYS_ENOMSG", 43: "SYS_EIDRM", 44: "SYS_ECHRNG", 45: "SYS_EL2NSYNC", 46: "SYS_EL3HLT", 47: "SYS_EL3RST", 48: "SYS_ELNRNG", 49: "SYS_EUNATCH", 50: "SYS_ENOCSI", 51: "SYS_EL2HLT", 52: "SYS_EBADE", 53: "SYS_EBADR", 54: "SYS_EXFULL", 55: "SYS_ENOANO", 56: "SYS_EBADRQC", 57: "SYS_EBADSLT", 59: "SYS_EBFONT", 60: "SYS_ENOSTR", 61: "SYS_ENODATA", 62: "SYS_ETIME", 63: "SYS_ENOSR", 64: "SYS_ENONET", 65: "SYS_ENOPKG", 66: "SYS_EREMOTE", 67: "SYS_ENOLINK", 68: "SYS_EADV", 69: "SYS_ESRMNT", 70: "SYS_ECOMM", 71: "SYS_EPROTO", 72: "SYS_EMULTIHOP", 73: "SYS_EDOTDOT", 74: "SYS_EBADMSG", 75: "SYS_EOVERFLOW", 76: "SYS_ENOTUNIQ", 77: "SYS_EBADFD", 78: "SYS_EREMCHG", 79: "SYS_ELIBACC", 80: "SYS_ELIBBAD", 81: "SYS_ELIBSCN", 82: "SYS_ELIBMAX", 83: "SYS_ELIBEXEC", 84: "SYS_EILSEQ", 85: "SYS_ERESTART", 86: "SYS_ESTRPIPE", 87: "SYS_EUSERS", 88: "SYS_ENOTSOCK", 89: "SYS_EDESTADDRREQ", 90: "SYS_EMSGSIZE", 91: "SYS_EPROTOTYPE", 92: "SYS_ENOPROTOOPT", 93: "SYS_EPROTONOSUPPORT", 94: "SYS_ESOCKTNOSUPPORT", 95: "SYS_EOPNOTSUPP", // Duplicate value: 95: "SYS_ENOTSUP", 96: "SYS_EPFNOSUPPORT", 97: "SYS_EAFNOSUPPORT", 98: "SYS_EADDRINUSE", 99: "SYS_EADDRNOTAVAIL", 100: "SYS_ENETDOWN", 101: "SYS_ENETUNREACH", 102: "SYS_ENETRESET", 103: "SYS_ECONNABORTED", 104: "SYS_ECONNRESET", 105: "SYS_ENOBUFS", 106: "SYS_EISCONN", 107: "SYS_ENOTCONN", 108: "SYS_ESHUTDOWN", 109: "SYS_ETOOMANYREFS", 110: "SYS_ETIMEDOUT", 111: "SYS_ECONNREFUSED", 112: "SYS_EHOSTDOWN", 113: "SYS_EHOSTUNREACH", 114: "SYS_EALREADY", 115: "SYS_EINPROGRESS", 116: "SYS_ESTALE", 117: "SYS_EUCLEAN", 118: "SYS_ENOTNAM", 119: "SYS_ENAVAIL", 120: "SYS_EISNAM", 121: "SYS_EREMOTEIO", 122: "SYS_EDQUOT", 123: "SYS_ENOMEDIUM", 124: "SYS_EMEDIUMTYPE", 125: "SYS_ECANCELED", 126: "SYS_ENOKEY", 127: "SYS_EKEYEXPIRED", 128: "SYS_EKEYREVOKED", 129: "SYS_EKEYREJECTED", 130: "SYS_EOWNERDEAD", 131: "SYS_ENOTRECOVERABLE", 132: "SYS_ERFKILL", } var RemoteSocketServiceError_SystemError_value = map[string]int32{ "SYS_SUCCESS": 0, "SYS_EPERM": 1, "SYS_ENOENT": 2, "SYS_ESRCH": 3, "SYS_EINTR": 4, "SYS_EIO": 5, "SYS_ENXIO": 6, "SYS_E2BIG": 7, "SYS_ENOEXEC": 8, "SYS_EBADF": 9, "SYS_ECHILD": 10, "SYS_EAGAIN": 11, "SYS_EWOULDBLOCK": 11, "SYS_ENOMEM": 12, "SYS_EACCES": 13, "SYS_EFAULT": 14, "SYS_ENOTBLK": 15, "SYS_EBUSY": 16, "SYS_EEXIST": 17, "SYS_EXDEV": 18, "SYS_ENODEV": 19, "SYS_ENOTDIR": 20, "SYS_EISDIR": 21, "SYS_EINVAL": 22, "SYS_ENFILE": 23, "SYS_EMFILE": 24, "SYS_ENOTTY": 25, "SYS_ETXTBSY": 26, "SYS_EFBIG": 27, "SYS_ENOSPC": 28, "SYS_ESPIPE": 29, "SYS_EROFS": 30, "SYS_EMLINK": 31, "SYS_EPIPE": 32, "SYS_EDOM": 33, "SYS_ERANGE": 34, "SYS_EDEADLK": 35, "SYS_EDEADLOCK": 35, "SYS_ENAMETOOLONG": 36, "SYS_ENOLCK": 37, "SYS_ENOSYS": 38, "SYS_ENOTEMPTY": 39, "SYS_ELOOP": 40, "SYS_ENOMSG": 42, "SYS_EIDRM": 43, "SYS_ECHRNG": 44, "SYS_EL2NSYNC": 45, "SYS_EL3HLT": 46, "SYS_EL3RST": 47, "SYS_ELNRNG": 48, "SYS_EUNATCH": 49, "SYS_ENOCSI": 50, "SYS_EL2HLT": 51, "SYS_EBADE": 52, "SYS_EBADR": 53, "SYS_EXFULL": 54, "SYS_ENOANO": 55, "SYS_EBADRQC": 56, "SYS_EBADSLT": 57, "SYS_EBFONT": 59, "SYS_ENOSTR": 60, "SYS_ENODATA": 61, "SYS_ETIME": 62, "SYS_ENOSR": 63, "SYS_ENONET": 64, "SYS_ENOPKG": 65, "SYS_EREMOTE": 66, "SYS_ENOLINK": 67, "SYS_EADV": 68, "SYS_ESRMNT": 69, "SYS_ECOMM": 70, "SYS_EPROTO": 71, "SYS_EMULTIHOP": 72, "SYS_EDOTDOT": 73, "SYS_EBADMSG": 74, "SYS_EOVERFLOW": 75, "SYS_ENOTUNIQ": 76, "SYS_EBADFD": 77, "SYS_EREMCHG": 78, "SYS_ELIBACC": 79, "SYS_ELIBBAD": 80, "SYS_ELIBSCN": 81, "SYS_ELIBMAX": 82, "SYS_ELIBEXEC": 83, "SYS_EILSEQ": 84, "SYS_ERESTART": 85, "SYS_ESTRPIPE": 86, "SYS_EUSERS": 87, "SYS_ENOTSOCK": 88, "SYS_EDESTADDRREQ": 89, "SYS_EMSGSIZE": 90, "SYS_EPROTOTYPE": 91, "SYS_ENOPROTOOPT": 92, "SYS_EPROTONOSUPPORT": 93, "SYS_ESOCKTNOSUPPORT": 94, "SYS_EOPNOTSUPP": 95, "SYS_ENOTSUP": 95, "SYS_EPFNOSUPPORT": 96, "SYS_EAFNOSUPPORT": 97, "SYS_EADDRINUSE": 98, "SYS_EADDRNOTAVAIL": 99, "SYS_ENETDOWN": 100, "SYS_ENETUNREACH": 101, "SYS_ENETRESET": 102, "SYS_ECONNABORTED": 103, "SYS_ECONNRESET": 104, "SYS_ENOBUFS": 105, "SYS_EISCONN": 106, "SYS_ENOTCONN": 107, "SYS_ESHUTDOWN": 108, "SYS_ETOOMANYREFS": 109, "SYS_ETIMEDOUT": 110, "SYS_ECONNREFUSED": 111, "SYS_EHOSTDOWN": 112, "SYS_EHOSTUNREACH": 113, "SYS_EALREADY": 114, "SYS_EINPROGRESS": 115, "SYS_ESTALE": 116, "SYS_EUCLEAN": 117, "SYS_ENOTNAM": 118, "SYS_ENAVAIL": 119, "SYS_EISNAM": 120, "SYS_EREMOTEIO": 121, "SYS_EDQUOT": 122, "SYS_ENOMEDIUM": 123, "SYS_EMEDIUMTYPE": 124, "SYS_ECANCELED": 125, "SYS_ENOKEY": 126, "SYS_EKEYEXPIRED": 127, "SYS_EKEYREVOKED": 128, "SYS_EKEYREJECTED": 129, "SYS_EOWNERDEAD": 130, "SYS_ENOTRECOVERABLE": 131, "SYS_ERFKILL": 132, } func (x RemoteSocketServiceError_SystemError) Enum() *RemoteSocketServiceError_SystemError { p := new(RemoteSocketServiceError_SystemError) *p = x return p } func (x RemoteSocketServiceError_SystemError) String() string { return proto.EnumName(RemoteSocketServiceError_SystemError_name, int32(x)) } func (x *RemoteSocketServiceError_SystemError) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_SystemError_value, data, "RemoteSocketServiceError_SystemError") if err != nil { return err } *x = RemoteSocketServiceError_SystemError(value) return nil } type CreateSocketRequest_SocketFamily int32 const ( CreateSocketRequest_IPv4 CreateSocketRequest_SocketFamily = 1 CreateSocketRequest_IPv6 CreateSocketRequest_SocketFamily = 2 ) var CreateSocketRequest_SocketFamily_name = map[int32]string{ 1: "IPv4", 2: "IPv6", } var CreateSocketRequest_SocketFamily_value = map[string]int32{ "IPv4": 1, "IPv6": 2, } func (x CreateSocketRequest_SocketFamily) Enum() *CreateSocketRequest_SocketFamily { p := new(CreateSocketRequest_SocketFamily) *p = x return p } func (x CreateSocketRequest_SocketFamily) String() string { return proto.EnumName(CreateSocketRequest_SocketFamily_name, int32(x)) } func (x *CreateSocketRequest_SocketFamily) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketFamily_value, data, "CreateSocketRequest_SocketFamily") if err != nil { return err } *x = CreateSocketRequest_SocketFamily(value) return nil } type CreateSocketRequest_SocketProtocol int32 const ( CreateSocketRequest_TCP CreateSocketRequest_SocketProtocol = 1 CreateSocketRequest_UDP CreateSocketRequest_SocketProtocol = 2 ) var CreateSocketRequest_SocketProtocol_name = map[int32]string{ 1: "TCP", 2: "UDP", } var CreateSocketRequest_SocketProtocol_value = map[string]int32{ "TCP": 1, "UDP": 2, } func (x CreateSocketRequest_SocketProtocol) Enum() *CreateSocketRequest_SocketProtocol { p := new(CreateSocketRequest_SocketProtocol) *p = x return p } func (x CreateSocketRequest_SocketProtocol) String() string { return proto.EnumName(CreateSocketRequest_SocketProtocol_name, int32(x)) } func (x *CreateSocketRequest_SocketProtocol) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketProtocol_value, data, "CreateSocketRequest_SocketProtocol") if err != nil { return err } *x = CreateSocketRequest_SocketProtocol(value) return nil } type SocketOption_SocketOptionLevel int32 const ( SocketOption_SOCKET_SOL_IP SocketOption_SocketOptionLevel = 0 SocketOption_SOCKET_SOL_SOCKET SocketOption_SocketOptionLevel = 1 SocketOption_SOCKET_SOL_TCP SocketOption_SocketOptionLevel = 6 SocketOption_SOCKET_SOL_UDP SocketOption_SocketOptionLevel = 17 ) var SocketOption_SocketOptionLevel_name = map[int32]string{ 0: "SOCKET_SOL_IP", 1: "SOCKET_SOL_SOCKET", 6: "SOCKET_SOL_TCP", 17: "SOCKET_SOL_UDP", } var SocketOption_SocketOptionLevel_value = map[string]int32{ "SOCKET_SOL_IP": 0, "SOCKET_SOL_SOCKET": 1, "SOCKET_SOL_TCP": 6, "SOCKET_SOL_UDP": 17, } func (x SocketOption_SocketOptionLevel) Enum() *SocketOption_SocketOptionLevel { p := new(SocketOption_SocketOptionLevel) *p = x return p } func (x SocketOption_SocketOptionLevel) String() string { return proto.EnumName(SocketOption_SocketOptionLevel_name, int32(x)) } func (x *SocketOption_SocketOptionLevel) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionLevel_value, data, "SocketOption_SocketOptionLevel") if err != nil { return err } *x = SocketOption_SocketOptionLevel(value) return nil } type SocketOption_SocketOptionName int32 const ( SocketOption_SOCKET_SO_DEBUG SocketOption_SocketOptionName = 1 SocketOption_SOCKET_SO_REUSEADDR SocketOption_SocketOptionName = 2 SocketOption_SOCKET_SO_TYPE SocketOption_SocketOptionName = 3 SocketOption_SOCKET_SO_ERROR SocketOption_SocketOptionName = 4 SocketOption_SOCKET_SO_DONTROUTE SocketOption_SocketOptionName = 5 SocketOption_SOCKET_SO_BROADCAST SocketOption_SocketOptionName = 6 SocketOption_SOCKET_SO_SNDBUF SocketOption_SocketOptionName = 7 SocketOption_SOCKET_SO_RCVBUF SocketOption_SocketOptionName = 8 SocketOption_SOCKET_SO_KEEPALIVE SocketOption_SocketOptionName = 9 SocketOption_SOCKET_SO_OOBINLINE SocketOption_SocketOptionName = 10 SocketOption_SOCKET_SO_LINGER SocketOption_SocketOptionName = 13 SocketOption_SOCKET_SO_RCVTIMEO SocketOption_SocketOptionName = 20 SocketOption_SOCKET_SO_SNDTIMEO SocketOption_SocketOptionName = 21 SocketOption_SOCKET_IP_TOS SocketOption_SocketOptionName = 1 SocketOption_SOCKET_IP_TTL SocketOption_SocketOptionName = 2 SocketOption_SOCKET_IP_HDRINCL SocketOption_SocketOptionName = 3 SocketOption_SOCKET_IP_OPTIONS SocketOption_SocketOptionName = 4 SocketOption_SOCKET_TCP_NODELAY SocketOption_SocketOptionName = 1 SocketOption_SOCKET_TCP_MAXSEG SocketOption_SocketOptionName = 2 SocketOption_SOCKET_TCP_CORK SocketOption_SocketOptionName = 3 SocketOption_SOCKET_TCP_KEEPIDLE SocketOption_SocketOptionName = 4 SocketOption_SOCKET_TCP_KEEPINTVL SocketOption_SocketOptionName = 5 SocketOption_SOCKET_TCP_KEEPCNT SocketOption_SocketOptionName = 6 SocketOption_SOCKET_TCP_SYNCNT SocketOption_SocketOptionName = 7 SocketOption_SOCKET_TCP_LINGER2 SocketOption_SocketOptionName = 8 SocketOption_SOCKET_TCP_DEFER_ACCEPT SocketOption_SocketOptionName = 9 SocketOption_SOCKET_TCP_WINDOW_CLAMP SocketOption_SocketOptionName = 10 SocketOption_SOCKET_TCP_INFO SocketOption_SocketOptionName = 11 SocketOption_SOCKET_TCP_QUICKACK SocketOption_SocketOptionName = 12 ) var SocketOption_SocketOptionName_name = map[int32]string{ 1: "SOCKET_SO_DEBUG", 2: "SOCKET_SO_REUSEADDR", 3: "SOCKET_SO_TYPE", 4: "SOCKET_SO_ERROR", 5: "SOCKET_SO_DONTROUTE", 6: "SOCKET_SO_BROADCAST", 7: "SOCKET_SO_SNDBUF", 8: "SOCKET_SO_RCVBUF", 9: "SOCKET_SO_KEEPALIVE", 10: "SOCKET_SO_OOBINLINE", 13: "SOCKET_SO_LINGER", 20: "SOCKET_SO_RCVTIMEO", 21: "SOCKET_SO_SNDTIMEO", // Duplicate value: 1: "SOCKET_IP_TOS", // Duplicate value: 2: "SOCKET_IP_TTL", // Duplicate value: 3: "SOCKET_IP_HDRINCL", // Duplicate value: 4: "SOCKET_IP_OPTIONS", // Duplicate value: 1: "SOCKET_TCP_NODELAY", // Duplicate value: 2: "SOCKET_TCP_MAXSEG", // Duplicate value: 3: "SOCKET_TCP_CORK", // Duplicate value: 4: "SOCKET_TCP_KEEPIDLE", // Duplicate value: 5: "SOCKET_TCP_KEEPINTVL", // Duplicate value: 6: "SOCKET_TCP_KEEPCNT", // Duplicate value: 7: "SOCKET_TCP_SYNCNT", // Duplicate value: 8: "SOCKET_TCP_LINGER2", // Duplicate value: 9: "SOCKET_TCP_DEFER_ACCEPT", // Duplicate value: 10: "SOCKET_TCP_WINDOW_CLAMP", 11: "SOCKET_TCP_INFO", 12: "SOCKET_TCP_QUICKACK", } var SocketOption_SocketOptionName_value = map[string]int32{ "SOCKET_SO_DEBUG": 1, "SOCKET_SO_REUSEADDR": 2, "SOCKET_SO_TYPE": 3, "SOCKET_SO_ERROR": 4, "SOCKET_SO_DONTROUTE": 5, "SOCKET_SO_BROADCAST": 6, "SOCKET_SO_SNDBUF": 7, "SOCKET_SO_RCVBUF": 8, "SOCKET_SO_KEEPALIVE": 9, "SOCKET_SO_OOBINLINE": 10, "SOCKET_SO_LINGER": 13, "SOCKET_SO_RCVTIMEO": 20, "SOCKET_SO_SNDTIMEO": 21, "SOCKET_IP_TOS": 1, "SOCKET_IP_TTL": 2, "SOCKET_IP_HDRINCL": 3, "SOCKET_IP_OPTIONS": 4, "SOCKET_TCP_NODELAY": 1, "SOCKET_TCP_MAXSEG": 2, "SOCKET_TCP_CORK": 3, "SOCKET_TCP_KEEPIDLE": 4, "SOCKET_TCP_KEEPINTVL": 5, "SOCKET_TCP_KEEPCNT": 6, "SOCKET_TCP_SYNCNT": 7, "SOCKET_TCP_LINGER2": 8, "SOCKET_TCP_DEFER_ACCEPT": 9, "SOCKET_TCP_WINDOW_CLAMP": 10, "SOCKET_TCP_INFO": 11, "SOCKET_TCP_QUICKACK": 12, } func (x SocketOption_SocketOptionName) Enum() *SocketOption_SocketOptionName { p := new(SocketOption_SocketOptionName) *p = x return p } func (x SocketOption_SocketOptionName) String() string { return proto.EnumName(SocketOption_SocketOptionName_name, int32(x)) } func (x *SocketOption_SocketOptionName) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionName_value, data, "SocketOption_SocketOptionName") if err != nil { return err } *x = SocketOption_SocketOptionName(value) return nil } type ShutDownRequest_How int32 const ( ShutDownRequest_SOCKET_SHUT_RD ShutDownRequest_How = 1 ShutDownRequest_SOCKET_SHUT_WR ShutDownRequest_How = 2 ShutDownRequest_SOCKET_SHUT_RDWR ShutDownRequest_How = 3 ) var ShutDownRequest_How_name = map[int32]string{ 1: "SOCKET_SHUT_RD", 2: "SOCKET_SHUT_WR", 3: "SOCKET_SHUT_RDWR", } var ShutDownRequest_How_value = map[string]int32{ "SOCKET_SHUT_RD": 1, "SOCKET_SHUT_WR": 2, "SOCKET_SHUT_RDWR": 3, } func (x ShutDownRequest_How) Enum() *ShutDownRequest_How { p := new(ShutDownRequest_How) *p = x return p } func (x ShutDownRequest_How) String() string { return proto.EnumName(ShutDownRequest_How_name, int32(x)) } func (x *ShutDownRequest_How) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(ShutDownRequest_How_value, data, "ShutDownRequest_How") if err != nil { return err } *x = ShutDownRequest_How(value) return nil } type ReceiveRequest_Flags int32 const ( ReceiveRequest_MSG_OOB ReceiveRequest_Flags = 1 ReceiveRequest_MSG_PEEK ReceiveRequest_Flags = 2 ) var ReceiveRequest_Flags_name = map[int32]string{ 1: "MSG_OOB", 2: "MSG_PEEK", } var ReceiveRequest_Flags_value = map[string]int32{ "MSG_OOB": 1, "MSG_PEEK": 2, } func (x ReceiveRequest_Flags) Enum() *ReceiveRequest_Flags { p := new(ReceiveRequest_Flags) *p = x return p } func (x ReceiveRequest_Flags) String() string { return proto.EnumName(ReceiveRequest_Flags_name, int32(x)) } func (x *ReceiveRequest_Flags) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(ReceiveRequest_Flags_value, data, "ReceiveRequest_Flags") if err != nil { return err } *x = ReceiveRequest_Flags(value) return nil } type PollEvent_PollEventFlag int32 const ( PollEvent_SOCKET_POLLNONE PollEvent_PollEventFlag = 0 PollEvent_SOCKET_POLLIN PollEvent_PollEventFlag = 1 PollEvent_SOCKET_POLLPRI PollEvent_PollEventFlag = 2 PollEvent_SOCKET_POLLOUT PollEvent_PollEventFlag = 4 PollEvent_SOCKET_POLLERR PollEvent_PollEventFlag = 8 PollEvent_SOCKET_POLLHUP PollEvent_PollEventFlag = 16 PollEvent_SOCKET_POLLNVAL PollEvent_PollEventFlag = 32 PollEvent_SOCKET_POLLRDNORM PollEvent_PollEventFlag = 64 PollEvent_SOCKET_POLLRDBAND PollEvent_PollEventFlag = 128 PollEvent_SOCKET_POLLWRNORM PollEvent_PollEventFlag = 256 PollEvent_SOCKET_POLLWRBAND PollEvent_PollEventFlag = 512 PollEvent_SOCKET_POLLMSG PollEvent_PollEventFlag = 1024 PollEvent_SOCKET_POLLREMOVE PollEvent_PollEventFlag = 4096 PollEvent_SOCKET_POLLRDHUP PollEvent_PollEventFlag = 8192 ) var PollEvent_PollEventFlag_name = map[int32]string{ 0: "SOCKET_POLLNONE", 1: "SOCKET_POLLIN", 2: "SOCKET_POLLPRI", 4: "SOCKET_POLLOUT", 8: "SOCKET_POLLERR", 16: "SOCKET_POLLHUP", 32: "SOCKET_POLLNVAL", 64: "SOCKET_POLLRDNORM", 128: "SOCKET_POLLRDBAND", 256: "SOCKET_POLLWRNORM", 512: "SOCKET_POLLWRBAND", 1024: "SOCKET_POLLMSG", 4096: "SOCKET_POLLREMOVE", 8192: "SOCKET_POLLRDHUP", } var PollEvent_PollEventFlag_value = map[string]int32{ "SOCKET_POLLNONE": 0, "SOCKET_POLLIN": 1, "SOCKET_POLLPRI": 2, "SOCKET_POLLOUT": 4, "SOCKET_POLLERR": 8, "SOCKET_POLLHUP": 16, "SOCKET_POLLNVAL": 32, "SOCKET_POLLRDNORM": 64, "SOCKET_POLLRDBAND": 128, "SOCKET_POLLWRNORM": 256, "SOCKET_POLLWRBAND": 512, "SOCKET_POLLMSG": 1024, "SOCKET_POLLREMOVE": 4096, "SOCKET_POLLRDHUP": 8192, } func (x PollEvent_PollEventFlag) Enum() *PollEvent_PollEventFlag { p := new(PollEvent_PollEventFlag) *p = x return p } func (x PollEvent_PollEventFlag) String() string { return proto.EnumName(PollEvent_PollEventFlag_name, int32(x)) } func (x *PollEvent_PollEventFlag) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(PollEvent_PollEventFlag_value, data, "PollEvent_PollEventFlag") if err != nil { return err } *x = PollEvent_PollEventFlag(value) return nil } type ResolveReply_ErrorCode int32 const ( ResolveReply_SOCKET_EAI_ADDRFAMILY ResolveReply_ErrorCode = 1 ResolveReply_SOCKET_EAI_AGAIN ResolveReply_ErrorCode = 2 ResolveReply_SOCKET_EAI_BADFLAGS ResolveReply_ErrorCode = 3 ResolveReply_SOCKET_EAI_FAIL ResolveReply_ErrorCode = 4 ResolveReply_SOCKET_EAI_FAMILY ResolveReply_ErrorCode = 5 ResolveReply_SOCKET_EAI_MEMORY ResolveReply_ErrorCode = 6 ResolveReply_SOCKET_EAI_NODATA ResolveReply_ErrorCode = 7 ResolveReply_SOCKET_EAI_NONAME ResolveReply_ErrorCode = 8 ResolveReply_SOCKET_EAI_SERVICE ResolveReply_ErrorCode = 9 ResolveReply_SOCKET_EAI_SOCKTYPE ResolveReply_ErrorCode = 10 ResolveReply_SOCKET_EAI_SYSTEM ResolveReply_ErrorCode = 11 ResolveReply_SOCKET_EAI_BADHINTS ResolveReply_ErrorCode = 12 ResolveReply_SOCKET_EAI_PROTOCOL ResolveReply_ErrorCode = 13 ResolveReply_SOCKET_EAI_OVERFLOW ResolveReply_ErrorCode = 14 ResolveReply_SOCKET_EAI_MAX ResolveReply_ErrorCode = 15 ) var ResolveReply_ErrorCode_name = map[int32]string{ 1: "SOCKET_EAI_ADDRFAMILY", 2: "SOCKET_EAI_AGAIN", 3: "SOCKET_EAI_BADFLAGS", 4: "SOCKET_EAI_FAIL", 5: "SOCKET_EAI_FAMILY", 6: "SOCKET_EAI_MEMORY", 7: "SOCKET_EAI_NODATA", 8: "SOCKET_EAI_NONAME", 9: "SOCKET_EAI_SERVICE", 10: "SOCKET_EAI_SOCKTYPE", 11: "SOCKET_EAI_SYSTEM", 12: "SOCKET_EAI_BADHINTS", 13: "SOCKET_EAI_PROTOCOL", 14: "SOCKET_EAI_OVERFLOW", 15: "SOCKET_EAI_MAX", } var ResolveReply_ErrorCode_value = map[string]int32{ "SOCKET_EAI_ADDRFAMILY": 1, "SOCKET_EAI_AGAIN": 2, "SOCKET_EAI_BADFLAGS": 3, "SOCKET_EAI_FAIL": 4, "SOCKET_EAI_FAMILY": 5, "SOCKET_EAI_MEMORY": 6, "SOCKET_EAI_NODATA": 7, "SOCKET_EAI_NONAME": 8, "SOCKET_EAI_SERVICE": 9, "SOCKET_EAI_SOCKTYPE": 10, "SOCKET_EAI_SYSTEM": 11, "SOCKET_EAI_BADHINTS": 12, "SOCKET_EAI_PROTOCOL": 13, "SOCKET_EAI_OVERFLOW": 14, "SOCKET_EAI_MAX": 15, } func (x ResolveReply_ErrorCode) Enum() *ResolveReply_ErrorCode { p := new(ResolveReply_ErrorCode) *p = x return p } func (x ResolveReply_ErrorCode) String() string { return proto.EnumName(ResolveReply_ErrorCode_name, int32(x)) } func (x *ResolveReply_ErrorCode) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(ResolveReply_ErrorCode_value, data, "ResolveReply_ErrorCode") if err != nil { return err } *x = ResolveReply_ErrorCode(value) return nil } type RemoteSocketServiceError struct { SystemError *int32 `protobuf:"varint,1,opt,name=system_error,def=0" json:"system_error,omitempty"` ErrorDetail *string `protobuf:"bytes,2,opt,name=error_detail" json:"error_detail,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *RemoteSocketServiceError) Reset() { *m = RemoteSocketServiceError{} } func (m *RemoteSocketServiceError) String() string { return proto.CompactTextString(m) } func (*RemoteSocketServiceError) ProtoMessage() {} const Default_RemoteSocketServiceError_SystemError int32 = 0 func (m *RemoteSocketServiceError) GetSystemError() int32 { if m != nil && m.SystemError != nil { return *m.SystemError } return Default_RemoteSocketServiceError_SystemError } func (m *RemoteSocketServiceError) GetErrorDetail() string { if m != nil && m.ErrorDetail != nil { return *m.ErrorDetail } return "" } type AddressPort struct { Port *int32 `protobuf:"varint,1,req,name=port" json:"port,omitempty"` PackedAddress []byte `protobuf:"bytes,2,opt,name=packed_address" json:"packed_address,omitempty"` HostnameHint *string `protobuf:"bytes,3,opt,name=hostname_hint" json:"hostname_hint,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *AddressPort) Reset() { *m = AddressPort{} } func (m *AddressPort) String() string { return proto.CompactTextString(m) } func (*AddressPort) ProtoMessage() {} func (m *AddressPort) GetPort() int32 { if m != nil && m.Port != nil { return *m.Port } return 0 } func (m *AddressPort) GetPackedAddress() []byte { if m != nil { return m.PackedAddress } return nil } func (m *AddressPort) GetHostnameHint() string { if m != nil && m.HostnameHint != nil { return *m.HostnameHint } return "" } type CreateSocketRequest struct { Family *CreateSocketRequest_SocketFamily `protobuf:"varint,1,req,name=family,enum=appengine.CreateSocketRequest_SocketFamily" json:"family,omitempty"` Protocol *CreateSocketRequest_SocketProtocol `protobuf:"varint,2,req,name=protocol,enum=appengine.CreateSocketRequest_SocketProtocol" json:"protocol,omitempty"` SocketOptions []*SocketOption `protobuf:"bytes,3,rep,name=socket_options" json:"socket_options,omitempty"` ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip" json:"proxy_external_ip,omitempty"` ListenBacklog *int32 `protobuf:"varint,5,opt,name=listen_backlog,def=0" json:"listen_backlog,omitempty"` RemoteIp *AddressPort `protobuf:"bytes,6,opt,name=remote_ip" json:"remote_ip,omitempty"` AppId *string `protobuf:"bytes,9,opt,name=app_id" json:"app_id,omitempty"` ProjectId *int64 `protobuf:"varint,10,opt,name=project_id" json:"project_id,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *CreateSocketRequest) Reset() { *m = CreateSocketRequest{} } func (m *CreateSocketRequest) String() string { return proto.CompactTextString(m) } func (*CreateSocketRequest) ProtoMessage() {} const Default_CreateSocketRequest_ListenBacklog int32 = 0 func (m *CreateSocketRequest) GetFamily() CreateSocketRequest_SocketFamily { if m != nil && m.Family != nil { return *m.Family } return CreateSocketRequest_IPv4 } func (m *CreateSocketRequest) GetProtocol() CreateSocketRequest_SocketProtocol { if m != nil && m.Protocol != nil { return *m.Protocol } return CreateSocketRequest_TCP } func (m *CreateSocketRequest) GetSocketOptions() []*SocketOption { if m != nil { return m.SocketOptions } return nil } func (m *CreateSocketRequest) GetProxyExternalIp() *AddressPort { if m != nil { return m.ProxyExternalIp } return nil } func (m *CreateSocketRequest) GetListenBacklog() int32 { if m != nil && m.ListenBacklog != nil { return *m.ListenBacklog } return Default_CreateSocketRequest_ListenBacklog } func (m *CreateSocketRequest) GetRemoteIp() *AddressPort { if m != nil { return m.RemoteIp } return nil } func (m *CreateSocketRequest) GetAppId() string { if m != nil && m.AppId != nil { return *m.AppId } return "" } func (m *CreateSocketRequest) GetProjectId() int64 { if m != nil && m.ProjectId != nil { return *m.ProjectId } return 0 } type CreateSocketReply struct { SocketDescriptor *string `protobuf:"bytes,1,opt,name=socket_descriptor" json:"socket_descriptor,omitempty"` ServerAddress *AddressPort `protobuf:"bytes,3,opt,name=server_address" json:"server_address,omitempty"` ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip" json:"proxy_external_ip,omitempty"` XXX_extensions map[int32]proto.Extension `json:"-"` XXX_unrecognized []byte `json:"-"` } func (m *CreateSocketReply) Reset() { *m = CreateSocketReply{} } func (m *CreateSocketReply) String() string { return proto.CompactTextString(m) } func (*CreateSocketReply) ProtoMessage() {} var extRange_CreateSocketReply = []proto.ExtensionRange{ {1000, 536870911}, } func (*CreateSocketReply) ExtensionRangeArray() []proto.ExtensionRange { return extRange_CreateSocketReply } func (m *CreateSocketReply) ExtensionMap() map[int32]proto.Extension { if m.XXX_extensions == nil { m.XXX_extensions = make(map[int32]proto.Extension) } return m.XXX_extensions } func (m *CreateSocketReply) GetSocketDescriptor() string { if m != nil && m.SocketDescriptor != nil { return *m.SocketDescriptor } return "" } func (m *CreateSocketReply) GetServerAddress() *AddressPort { if m != nil { return m.ServerAddress } return nil } func (m *CreateSocketReply) GetProxyExternalIp() *AddressPort { if m != nil { return m.ProxyExternalIp } return nil } type BindRequest struct { SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` ProxyExternalIp *AddressPort `protobuf:"bytes,2,req,name=proxy_external_ip" json:"proxy_external_ip,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *BindRequest) Reset() { *m = BindRequest{} } func (m *BindRequest) String() string { return proto.CompactTextString(m) } func (*BindRequest) ProtoMessage() {} func (m *BindRequest) GetSocketDescriptor() string { if m != nil && m.SocketDescriptor != nil { return *m.SocketDescriptor } return "" } func (m *BindRequest) GetProxyExternalIp() *AddressPort { if m != nil { return m.ProxyExternalIp } return nil } type BindReply struct { ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip" json:"proxy_external_ip,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *BindReply) Reset() { *m = BindReply{} } func (m *BindReply) String() string { return proto.CompactTextString(m) } func (*BindReply) ProtoMessage() {} func (m *BindReply) GetProxyExternalIp() *AddressPort { if m != nil { return m.ProxyExternalIp } return nil } type GetSocketNameRequest struct { SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *GetSocketNameRequest) Reset() { *m = GetSocketNameRequest{} } func (m *GetSocketNameRequest) String() string { return proto.CompactTextString(m) } func (*GetSocketNameRequest) ProtoMessage() {} func (m *GetSocketNameRequest) GetSocketDescriptor() string { if m != nil && m.SocketDescriptor != nil { return *m.SocketDescriptor } return "" } type GetSocketNameReply struct { ProxyExternalIp *AddressPort `protobuf:"bytes,2,opt,name=proxy_external_ip" json:"proxy_external_ip,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *GetSocketNameReply) Reset() { *m = GetSocketNameReply{} } func (m *GetSocketNameReply) String() string { return proto.CompactTextString(m) } func (*GetSocketNameReply) ProtoMessage() {} func (m *GetSocketNameReply) GetProxyExternalIp() *AddressPort { if m != nil { return m.ProxyExternalIp } return nil } type GetPeerNameRequest struct { SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *GetPeerNameRequest) Reset() { *m = GetPeerNameRequest{} } func (m *GetPeerNameRequest) String() string { return proto.CompactTextString(m) } func (*GetPeerNameRequest) ProtoMessage() {} func (m *GetPeerNameRequest) GetSocketDescriptor() string { if m != nil && m.SocketDescriptor != nil { return *m.SocketDescriptor } return "" } type GetPeerNameReply struct { PeerIp *AddressPort `protobuf:"bytes,2,opt,name=peer_ip" json:"peer_ip,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *GetPeerNameReply) Reset() { *m = GetPeerNameReply{} } func (m *GetPeerNameReply) String() string { return proto.CompactTextString(m) } func (*GetPeerNameReply) ProtoMessage() {} func (m *GetPeerNameReply) GetPeerIp() *AddressPort { if m != nil { return m.PeerIp } return nil } type SocketOption struct { Level *SocketOption_SocketOptionLevel `protobuf:"varint,1,req,name=level,enum=appengine.SocketOption_SocketOptionLevel" json:"level,omitempty"` Option *SocketOption_SocketOptionName `protobuf:"varint,2,req,name=option,enum=appengine.SocketOption_SocketOptionName" json:"option,omitempty"` Value []byte `protobuf:"bytes,3,req,name=value" json:"value,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *SocketOption) Reset() { *m = SocketOption{} } func (m *SocketOption) String() string { return proto.CompactTextString(m) } func (*SocketOption) ProtoMessage() {} func (m *SocketOption) GetLevel() SocketOption_SocketOptionLevel { if m != nil && m.Level != nil { return *m.Level } return SocketOption_SOCKET_SOL_IP } func (m *SocketOption) GetOption() SocketOption_SocketOptionName { if m != nil && m.Option != nil { return *m.Option } return SocketOption_SOCKET_SO_DEBUG } func (m *SocketOption) GetValue() []byte { if m != nil { return m.Value } return nil } type SetSocketOptionsRequest struct { SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *SetSocketOptionsRequest) Reset() { *m = SetSocketOptionsRequest{} } func (m *SetSocketOptionsRequest) String() string { return proto.CompactTextString(m) } func (*SetSocketOptionsRequest) ProtoMessage() {} func (m *SetSocketOptionsRequest) GetSocketDescriptor() string { if m != nil && m.SocketDescriptor != nil { return *m.SocketDescriptor } return "" } func (m *SetSocketOptionsRequest) GetOptions() []*SocketOption { if m != nil { return m.Options } return nil } type SetSocketOptionsReply struct { XXX_unrecognized []byte `json:"-"` } func (m *SetSocketOptionsReply) Reset() { *m = SetSocketOptionsReply{} } func (m *SetSocketOptionsReply) String() string { return proto.CompactTextString(m) } func (*SetSocketOptionsReply) ProtoMessage() {} type GetSocketOptionsRequest struct { SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *GetSocketOptionsRequest) Reset() { *m = GetSocketOptionsRequest{} } func (m *GetSocketOptionsRequest) String() string { return proto.CompactTextString(m) } func (*GetSocketOptionsRequest) ProtoMessage() {} func (m *GetSocketOptionsRequest) GetSocketDescriptor() string { if m != nil && m.SocketDescriptor != nil { return *m.SocketDescriptor } return "" } func (m *GetSocketOptionsRequest) GetOptions() []*SocketOption { if m != nil { return m.Options } return nil } type GetSocketOptionsReply struct { Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *GetSocketOptionsReply) Reset() { *m = GetSocketOptionsReply{} } func (m *GetSocketOptionsReply) String() string { return proto.CompactTextString(m) } func (*GetSocketOptionsReply) ProtoMessage() {} func (m *GetSocketOptionsReply) GetOptions() []*SocketOption { if m != nil { return m.Options } return nil } type ConnectRequest struct { SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` RemoteIp *AddressPort `protobuf:"bytes,2,req,name=remote_ip" json:"remote_ip,omitempty"` TimeoutSeconds *float64 `protobuf:"fixed64,3,opt,name=timeout_seconds,def=-1" json:"timeout_seconds,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *ConnectRequest) Reset() { *m = ConnectRequest{} } func (m *ConnectRequest) String() string { return proto.CompactTextString(m) } func (*ConnectRequest) ProtoMessage() {} const Default_ConnectRequest_TimeoutSeconds float64 = -1 func (m *ConnectRequest) GetSocketDescriptor() string { if m != nil && m.SocketDescriptor != nil { return *m.SocketDescriptor } return "" } func (m *ConnectRequest) GetRemoteIp() *AddressPort { if m != nil { return m.RemoteIp } return nil } func (m *ConnectRequest) GetTimeoutSeconds() float64 { if m != nil && m.TimeoutSeconds != nil { return *m.TimeoutSeconds } return Default_ConnectRequest_TimeoutSeconds } type ConnectReply struct { ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip" json:"proxy_external_ip,omitempty"` XXX_extensions map[int32]proto.Extension `json:"-"` XXX_unrecognized []byte `json:"-"` } func (m *ConnectReply) Reset() { *m = ConnectReply{} } func (m *ConnectReply) String() string { return proto.CompactTextString(m) } func (*ConnectReply) ProtoMessage() {} var extRange_ConnectReply = []proto.ExtensionRange{ {1000, 536870911}, } func (*ConnectReply) ExtensionRangeArray() []proto.ExtensionRange { return extRange_ConnectReply } func (m *ConnectReply) ExtensionMap() map[int32]proto.Extension { if m.XXX_extensions == nil { m.XXX_extensions = make(map[int32]proto.Extension) } return m.XXX_extensions } func (m *ConnectReply) GetProxyExternalIp() *AddressPort { if m != nil { return m.ProxyExternalIp } return nil } type ListenRequest struct { SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` Backlog *int32 `protobuf:"varint,2,req,name=backlog" json:"backlog,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *ListenRequest) Reset() { *m = ListenRequest{} } func (m *ListenRequest) String() string { return proto.CompactTextString(m) } func (*ListenRequest) ProtoMessage() {} func (m *ListenRequest) GetSocketDescriptor() string { if m != nil && m.SocketDescriptor != nil { return *m.SocketDescriptor } return "" } func (m *ListenRequest) GetBacklog() int32 { if m != nil && m.Backlog != nil { return *m.Backlog } return 0 } type ListenReply struct { XXX_unrecognized []byte `json:"-"` } func (m *ListenReply) Reset() { *m = ListenReply{} } func (m *ListenReply) String() string { return proto.CompactTextString(m) } func (*ListenReply) ProtoMessage() {} type AcceptRequest struct { SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,def=-1" json:"timeout_seconds,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *AcceptRequest) Reset() { *m = AcceptRequest{} } func (m *AcceptRequest) String() string { return proto.CompactTextString(m) } func (*AcceptRequest) ProtoMessage() {} const Default_AcceptRequest_TimeoutSeconds float64 = -1 func (m *AcceptRequest) GetSocketDescriptor() string { if m != nil && m.SocketDescriptor != nil { return *m.SocketDescriptor } return "" } func (m *AcceptRequest) GetTimeoutSeconds() float64 { if m != nil && m.TimeoutSeconds != nil { return *m.TimeoutSeconds } return Default_AcceptRequest_TimeoutSeconds } type AcceptReply struct { NewSocketDescriptor []byte `protobuf:"bytes,2,opt,name=new_socket_descriptor" json:"new_socket_descriptor,omitempty"` RemoteAddress *AddressPort `protobuf:"bytes,3,opt,name=remote_address" json:"remote_address,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *AcceptReply) Reset() { *m = AcceptReply{} } func (m *AcceptReply) String() string { return proto.CompactTextString(m) } func (*AcceptReply) ProtoMessage() {} func (m *AcceptReply) GetNewSocketDescriptor() []byte { if m != nil { return m.NewSocketDescriptor } return nil } func (m *AcceptReply) GetRemoteAddress() *AddressPort { if m != nil { return m.RemoteAddress } return nil } type ShutDownRequest struct { SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` How *ShutDownRequest_How `protobuf:"varint,2,req,name=how,enum=appengine.ShutDownRequest_How" json:"how,omitempty"` SendOffset *int64 `protobuf:"varint,3,req,name=send_offset" json:"send_offset,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *ShutDownRequest) Reset() { *m = ShutDownRequest{} } func (m *ShutDownRequest) String() string { return proto.CompactTextString(m) } func (*ShutDownRequest) ProtoMessage() {} func (m *ShutDownRequest) GetSocketDescriptor() string { if m != nil && m.SocketDescriptor != nil { return *m.SocketDescriptor } return "" } func (m *ShutDownRequest) GetHow() ShutDownRequest_How { if m != nil && m.How != nil { return *m.How } return ShutDownRequest_SOCKET_SHUT_RD } func (m *ShutDownRequest) GetSendOffset() int64 { if m != nil && m.SendOffset != nil { return *m.SendOffset } return 0 } type ShutDownReply struct { XXX_unrecognized []byte `json:"-"` } func (m *ShutDownReply) Reset() { *m = ShutDownReply{} } func (m *ShutDownReply) String() string { return proto.CompactTextString(m) } func (*ShutDownReply) ProtoMessage() {} type CloseRequest struct { SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` SendOffset *int64 `protobuf:"varint,2,opt,name=send_offset,def=-1" json:"send_offset,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *CloseRequest) Reset() { *m = CloseRequest{} } func (m *CloseRequest) String() string { return proto.CompactTextString(m) } func (*CloseRequest) ProtoMessage() {} const Default_CloseRequest_SendOffset int64 = -1 func (m *CloseRequest) GetSocketDescriptor() string { if m != nil && m.SocketDescriptor != nil { return *m.SocketDescriptor } return "" } func (m *CloseRequest) GetSendOffset() int64 { if m != nil && m.SendOffset != nil { return *m.SendOffset } return Default_CloseRequest_SendOffset } type CloseReply struct { XXX_unrecognized []byte `json:"-"` } func (m *CloseReply) Reset() { *m = CloseReply{} } func (m *CloseReply) String() string { return proto.CompactTextString(m) } func (*CloseReply) ProtoMessage() {} type SendRequest struct { SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` Data []byte `protobuf:"bytes,2,req,name=data" json:"data,omitempty"` StreamOffset *int64 `protobuf:"varint,3,req,name=stream_offset" json:"stream_offset,omitempty"` Flags *int32 `protobuf:"varint,4,opt,name=flags,def=0" json:"flags,omitempty"` SendTo *AddressPort `protobuf:"bytes,5,opt,name=send_to" json:"send_to,omitempty"` TimeoutSeconds *float64 `protobuf:"fixed64,6,opt,name=timeout_seconds,def=-1" json:"timeout_seconds,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *SendRequest) Reset() { *m = SendRequest{} } func (m *SendRequest) String() string { return proto.CompactTextString(m) } func (*SendRequest) ProtoMessage() {} const Default_SendRequest_Flags int32 = 0 const Default_SendRequest_TimeoutSeconds float64 = -1 func (m *SendRequest) GetSocketDescriptor() string { if m != nil && m.SocketDescriptor != nil { return *m.SocketDescriptor } return "" } func (m *SendRequest) GetData() []byte { if m != nil { return m.Data } return nil } func (m *SendRequest) GetStreamOffset() int64 { if m != nil && m.StreamOffset != nil { return *m.StreamOffset } return 0 } func (m *SendRequest) GetFlags() int32 { if m != nil && m.Flags != nil { return *m.Flags } return Default_SendRequest_Flags } func (m *SendRequest) GetSendTo() *AddressPort { if m != nil { return m.SendTo } return nil } func (m *SendRequest) GetTimeoutSeconds() float64 { if m != nil && m.TimeoutSeconds != nil { return *m.TimeoutSeconds } return Default_SendRequest_TimeoutSeconds } type SendReply struct { DataSent *int32 `protobuf:"varint,1,opt,name=data_sent" json:"data_sent,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *SendReply) Reset() { *m = SendReply{} } func (m *SendReply) String() string { return proto.CompactTextString(m) } func (*SendReply) ProtoMessage() {} func (m *SendReply) GetDataSent() int32 { if m != nil && m.DataSent != nil { return *m.DataSent } return 0 } type ReceiveRequest struct { SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` DataSize *int32 `protobuf:"varint,2,req,name=data_size" json:"data_size,omitempty"` Flags *int32 `protobuf:"varint,3,opt,name=flags,def=0" json:"flags,omitempty"` TimeoutSeconds *float64 `protobuf:"fixed64,5,opt,name=timeout_seconds,def=-1" json:"timeout_seconds,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *ReceiveRequest) Reset() { *m = ReceiveRequest{} } func (m *ReceiveRequest) String() string { return proto.CompactTextString(m) } func (*ReceiveRequest) ProtoMessage() {} const Default_ReceiveRequest_Flags int32 = 0 const Default_ReceiveRequest_TimeoutSeconds float64 = -1 func (m *ReceiveRequest) GetSocketDescriptor() string { if m != nil && m.SocketDescriptor != nil { return *m.SocketDescriptor } return "" } func (m *ReceiveRequest) GetDataSize() int32 { if m != nil && m.DataSize != nil { return *m.DataSize } return 0 } func (m *ReceiveRequest) GetFlags() int32 { if m != nil && m.Flags != nil { return *m.Flags } return Default_ReceiveRequest_Flags } func (m *ReceiveRequest) GetTimeoutSeconds() float64 { if m != nil && m.TimeoutSeconds != nil { return *m.TimeoutSeconds } return Default_ReceiveRequest_TimeoutSeconds } type ReceiveReply struct { StreamOffset *int64 `protobuf:"varint,2,opt,name=stream_offset" json:"stream_offset,omitempty"` Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"` ReceivedFrom *AddressPort `protobuf:"bytes,4,opt,name=received_from" json:"received_from,omitempty"` BufferSize *int32 `protobuf:"varint,5,opt,name=buffer_size" json:"buffer_size,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *ReceiveReply) Reset() { *m = ReceiveReply{} } func (m *ReceiveReply) String() string { return proto.CompactTextString(m) } func (*ReceiveReply) ProtoMessage() {} func (m *ReceiveReply) GetStreamOffset() int64 { if m != nil && m.StreamOffset != nil { return *m.StreamOffset } return 0 } func (m *ReceiveReply) GetData() []byte { if m != nil { return m.Data } return nil } func (m *ReceiveReply) GetReceivedFrom() *AddressPort { if m != nil { return m.ReceivedFrom } return nil } func (m *ReceiveReply) GetBufferSize() int32 { if m != nil && m.BufferSize != nil { return *m.BufferSize } return 0 } type PollEvent struct { SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` RequestedEvents *int32 `protobuf:"varint,2,req,name=requested_events" json:"requested_events,omitempty"` ObservedEvents *int32 `protobuf:"varint,3,req,name=observed_events" json:"observed_events,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *PollEvent) Reset() { *m = PollEvent{} } func (m *PollEvent) String() string { return proto.CompactTextString(m) } func (*PollEvent) ProtoMessage() {} func (m *PollEvent) GetSocketDescriptor() string { if m != nil && m.SocketDescriptor != nil { return *m.SocketDescriptor } return "" } func (m *PollEvent) GetRequestedEvents() int32 { if m != nil && m.RequestedEvents != nil { return *m.RequestedEvents } return 0 } func (m *PollEvent) GetObservedEvents() int32 { if m != nil && m.ObservedEvents != nil { return *m.ObservedEvents } return 0 } type PollRequest struct { Events []*PollEvent `protobuf:"bytes,1,rep,name=events" json:"events,omitempty"` TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,def=-1" json:"timeout_seconds,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *PollRequest) Reset() { *m = PollRequest{} } func (m *PollRequest) String() string { return proto.CompactTextString(m) } func (*PollRequest) ProtoMessage() {} const Default_PollRequest_TimeoutSeconds float64 = -1 func (m *PollRequest) GetEvents() []*PollEvent { if m != nil { return m.Events } return nil } func (m *PollRequest) GetTimeoutSeconds() float64 { if m != nil && m.TimeoutSeconds != nil { return *m.TimeoutSeconds } return Default_PollRequest_TimeoutSeconds } type PollReply struct { Events []*PollEvent `protobuf:"bytes,2,rep,name=events" json:"events,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *PollReply) Reset() { *m = PollReply{} } func (m *PollReply) String() string { return proto.CompactTextString(m) } func (*PollReply) ProtoMessage() {} func (m *PollReply) GetEvents() []*PollEvent { if m != nil { return m.Events } return nil } type ResolveRequest struct { Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` AddressFamilies []CreateSocketRequest_SocketFamily `protobuf:"varint,2,rep,name=address_families,enum=appengine.CreateSocketRequest_SocketFamily" json:"address_families,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *ResolveRequest) Reset() { *m = ResolveRequest{} } func (m *ResolveRequest) String() string { return proto.CompactTextString(m) } func (*ResolveRequest) ProtoMessage() {} func (m *ResolveRequest) GetName() string { if m != nil && m.Name != nil { return *m.Name } return "" } func (m *ResolveRequest) GetAddressFamilies() []CreateSocketRequest_SocketFamily { if m != nil { return m.AddressFamilies } return nil } type ResolveReply struct { PackedAddress [][]byte `protobuf:"bytes,2,rep,name=packed_address" json:"packed_address,omitempty"` CanonicalName *string `protobuf:"bytes,3,opt,name=canonical_name" json:"canonical_name,omitempty"` Aliases []string `protobuf:"bytes,4,rep,name=aliases" json:"aliases,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *ResolveReply) Reset() { *m = ResolveReply{} } func (m *ResolveReply) String() string { return proto.CompactTextString(m) } func (*ResolveReply) ProtoMessage() {} func (m *ResolveReply) GetPackedAddress() [][]byte { if m != nil { return m.PackedAddress } return nil } func (m *ResolveReply) GetCanonicalName() string { if m != nil && m.CanonicalName != nil { return *m.CanonicalName } return "" } func (m *ResolveReply) GetAliases() []string { if m != nil { return m.Aliases } return nil } func init() { } ================================================ FILE: vendor/google.golang.org/appengine/internal/socket/socket_service.proto ================================================ syntax = "proto2"; option go_package = "socket"; package appengine; message RemoteSocketServiceError { enum ErrorCode { SYSTEM_ERROR = 1; GAI_ERROR = 2; FAILURE = 4; PERMISSION_DENIED = 5; INVALID_REQUEST = 6; SOCKET_CLOSED = 7; } enum SystemError { option allow_alias = true; SYS_SUCCESS = 0; SYS_EPERM = 1; SYS_ENOENT = 2; SYS_ESRCH = 3; SYS_EINTR = 4; SYS_EIO = 5; SYS_ENXIO = 6; SYS_E2BIG = 7; SYS_ENOEXEC = 8; SYS_EBADF = 9; SYS_ECHILD = 10; SYS_EAGAIN = 11; SYS_EWOULDBLOCK = 11; SYS_ENOMEM = 12; SYS_EACCES = 13; SYS_EFAULT = 14; SYS_ENOTBLK = 15; SYS_EBUSY = 16; SYS_EEXIST = 17; SYS_EXDEV = 18; SYS_ENODEV = 19; SYS_ENOTDIR = 20; SYS_EISDIR = 21; SYS_EINVAL = 22; SYS_ENFILE = 23; SYS_EMFILE = 24; SYS_ENOTTY = 25; SYS_ETXTBSY = 26; SYS_EFBIG = 27; SYS_ENOSPC = 28; SYS_ESPIPE = 29; SYS_EROFS = 30; SYS_EMLINK = 31; SYS_EPIPE = 32; SYS_EDOM = 33; SYS_ERANGE = 34; SYS_EDEADLK = 35; SYS_EDEADLOCK = 35; SYS_ENAMETOOLONG = 36; SYS_ENOLCK = 37; SYS_ENOSYS = 38; SYS_ENOTEMPTY = 39; SYS_ELOOP = 40; SYS_ENOMSG = 42; SYS_EIDRM = 43; SYS_ECHRNG = 44; SYS_EL2NSYNC = 45; SYS_EL3HLT = 46; SYS_EL3RST = 47; SYS_ELNRNG = 48; SYS_EUNATCH = 49; SYS_ENOCSI = 50; SYS_EL2HLT = 51; SYS_EBADE = 52; SYS_EBADR = 53; SYS_EXFULL = 54; SYS_ENOANO = 55; SYS_EBADRQC = 56; SYS_EBADSLT = 57; SYS_EBFONT = 59; SYS_ENOSTR = 60; SYS_ENODATA = 61; SYS_ETIME = 62; SYS_ENOSR = 63; SYS_ENONET = 64; SYS_ENOPKG = 65; SYS_EREMOTE = 66; SYS_ENOLINK = 67; SYS_EADV = 68; SYS_ESRMNT = 69; SYS_ECOMM = 70; SYS_EPROTO = 71; SYS_EMULTIHOP = 72; SYS_EDOTDOT = 73; SYS_EBADMSG = 74; SYS_EOVERFLOW = 75; SYS_ENOTUNIQ = 76; SYS_EBADFD = 77; SYS_EREMCHG = 78; SYS_ELIBACC = 79; SYS_ELIBBAD = 80; SYS_ELIBSCN = 81; SYS_ELIBMAX = 82; SYS_ELIBEXEC = 83; SYS_EILSEQ = 84; SYS_ERESTART = 85; SYS_ESTRPIPE = 86; SYS_EUSERS = 87; SYS_ENOTSOCK = 88; SYS_EDESTADDRREQ = 89; SYS_EMSGSIZE = 90; SYS_EPROTOTYPE = 91; SYS_ENOPROTOOPT = 92; SYS_EPROTONOSUPPORT = 93; SYS_ESOCKTNOSUPPORT = 94; SYS_EOPNOTSUPP = 95; SYS_ENOTSUP = 95; SYS_EPFNOSUPPORT = 96; SYS_EAFNOSUPPORT = 97; SYS_EADDRINUSE = 98; SYS_EADDRNOTAVAIL = 99; SYS_ENETDOWN = 100; SYS_ENETUNREACH = 101; SYS_ENETRESET = 102; SYS_ECONNABORTED = 103; SYS_ECONNRESET = 104; SYS_ENOBUFS = 105; SYS_EISCONN = 106; SYS_ENOTCONN = 107; SYS_ESHUTDOWN = 108; SYS_ETOOMANYREFS = 109; SYS_ETIMEDOUT = 110; SYS_ECONNREFUSED = 111; SYS_EHOSTDOWN = 112; SYS_EHOSTUNREACH = 113; SYS_EALREADY = 114; SYS_EINPROGRESS = 115; SYS_ESTALE = 116; SYS_EUCLEAN = 117; SYS_ENOTNAM = 118; SYS_ENAVAIL = 119; SYS_EISNAM = 120; SYS_EREMOTEIO = 121; SYS_EDQUOT = 122; SYS_ENOMEDIUM = 123; SYS_EMEDIUMTYPE = 124; SYS_ECANCELED = 125; SYS_ENOKEY = 126; SYS_EKEYEXPIRED = 127; SYS_EKEYREVOKED = 128; SYS_EKEYREJECTED = 129; SYS_EOWNERDEAD = 130; SYS_ENOTRECOVERABLE = 131; SYS_ERFKILL = 132; } optional int32 system_error = 1 [default=0]; optional string error_detail = 2; } message AddressPort { required int32 port = 1; optional bytes packed_address = 2; optional string hostname_hint = 3; } message CreateSocketRequest { enum SocketFamily { IPv4 = 1; IPv6 = 2; } enum SocketProtocol { TCP = 1; UDP = 2; } required SocketFamily family = 1; required SocketProtocol protocol = 2; repeated SocketOption socket_options = 3; optional AddressPort proxy_external_ip = 4; optional int32 listen_backlog = 5 [default=0]; optional AddressPort remote_ip = 6; optional string app_id = 9; optional int64 project_id = 10; } message CreateSocketReply { optional string socket_descriptor = 1; optional AddressPort server_address = 3; optional AddressPort proxy_external_ip = 4; extensions 1000 to max; } message BindRequest { required string socket_descriptor = 1; required AddressPort proxy_external_ip = 2; } message BindReply { optional AddressPort proxy_external_ip = 1; } message GetSocketNameRequest { required string socket_descriptor = 1; } message GetSocketNameReply { optional AddressPort proxy_external_ip = 2; } message GetPeerNameRequest { required string socket_descriptor = 1; } message GetPeerNameReply { optional AddressPort peer_ip = 2; } message SocketOption { enum SocketOptionLevel { SOCKET_SOL_IP = 0; SOCKET_SOL_SOCKET = 1; SOCKET_SOL_TCP = 6; SOCKET_SOL_UDP = 17; } enum SocketOptionName { option allow_alias = true; SOCKET_SO_DEBUG = 1; SOCKET_SO_REUSEADDR = 2; SOCKET_SO_TYPE = 3; SOCKET_SO_ERROR = 4; SOCKET_SO_DONTROUTE = 5; SOCKET_SO_BROADCAST = 6; SOCKET_SO_SNDBUF = 7; SOCKET_SO_RCVBUF = 8; SOCKET_SO_KEEPALIVE = 9; SOCKET_SO_OOBINLINE = 10; SOCKET_SO_LINGER = 13; SOCKET_SO_RCVTIMEO = 20; SOCKET_SO_SNDTIMEO = 21; SOCKET_IP_TOS = 1; SOCKET_IP_TTL = 2; SOCKET_IP_HDRINCL = 3; SOCKET_IP_OPTIONS = 4; SOCKET_TCP_NODELAY = 1; SOCKET_TCP_MAXSEG = 2; SOCKET_TCP_CORK = 3; SOCKET_TCP_KEEPIDLE = 4; SOCKET_TCP_KEEPINTVL = 5; SOCKET_TCP_KEEPCNT = 6; SOCKET_TCP_SYNCNT = 7; SOCKET_TCP_LINGER2 = 8; SOCKET_TCP_DEFER_ACCEPT = 9; SOCKET_TCP_WINDOW_CLAMP = 10; SOCKET_TCP_INFO = 11; SOCKET_TCP_QUICKACK = 12; } required SocketOptionLevel level = 1; required SocketOptionName option = 2; required bytes value = 3; } message SetSocketOptionsRequest { required string socket_descriptor = 1; repeated SocketOption options = 2; } message SetSocketOptionsReply { } message GetSocketOptionsRequest { required string socket_descriptor = 1; repeated SocketOption options = 2; } message GetSocketOptionsReply { repeated SocketOption options = 2; } message ConnectRequest { required string socket_descriptor = 1; required AddressPort remote_ip = 2; optional double timeout_seconds = 3 [default=-1]; } message ConnectReply { optional AddressPort proxy_external_ip = 1; extensions 1000 to max; } message ListenRequest { required string socket_descriptor = 1; required int32 backlog = 2; } message ListenReply { } message AcceptRequest { required string socket_descriptor = 1; optional double timeout_seconds = 2 [default=-1]; } message AcceptReply { optional bytes new_socket_descriptor = 2; optional AddressPort remote_address = 3; } message ShutDownRequest { enum How { SOCKET_SHUT_RD = 1; SOCKET_SHUT_WR = 2; SOCKET_SHUT_RDWR = 3; } required string socket_descriptor = 1; required How how = 2; required int64 send_offset = 3; } message ShutDownReply { } message CloseRequest { required string socket_descriptor = 1; optional int64 send_offset = 2 [default=-1]; } message CloseReply { } message SendRequest { required string socket_descriptor = 1; required bytes data = 2 [ctype=CORD]; required int64 stream_offset = 3; optional int32 flags = 4 [default=0]; optional AddressPort send_to = 5; optional double timeout_seconds = 6 [default=-1]; } message SendReply { optional int32 data_sent = 1; } message ReceiveRequest { enum Flags { MSG_OOB = 1; MSG_PEEK = 2; } required string socket_descriptor = 1; required int32 data_size = 2; optional int32 flags = 3 [default=0]; optional double timeout_seconds = 5 [default=-1]; } message ReceiveReply { optional int64 stream_offset = 2; optional bytes data = 3 [ctype=CORD]; optional AddressPort received_from = 4; optional int32 buffer_size = 5; } message PollEvent { enum PollEventFlag { SOCKET_POLLNONE = 0; SOCKET_POLLIN = 1; SOCKET_POLLPRI = 2; SOCKET_POLLOUT = 4; SOCKET_POLLERR = 8; SOCKET_POLLHUP = 16; SOCKET_POLLNVAL = 32; SOCKET_POLLRDNORM = 64; SOCKET_POLLRDBAND = 128; SOCKET_POLLWRNORM = 256; SOCKET_POLLWRBAND = 512; SOCKET_POLLMSG = 1024; SOCKET_POLLREMOVE = 4096; SOCKET_POLLRDHUP = 8192; }; required string socket_descriptor = 1; required int32 requested_events = 2; required int32 observed_events = 3; } message PollRequest { repeated PollEvent events = 1; optional double timeout_seconds = 2 [default=-1]; } message PollReply { repeated PollEvent events = 2; } message ResolveRequest { required string name = 1; repeated CreateSocketRequest.SocketFamily address_families = 2; } message ResolveReply { enum ErrorCode { SOCKET_EAI_ADDRFAMILY = 1; SOCKET_EAI_AGAIN = 2; SOCKET_EAI_BADFLAGS = 3; SOCKET_EAI_FAIL = 4; SOCKET_EAI_FAMILY = 5; SOCKET_EAI_MEMORY = 6; SOCKET_EAI_NODATA = 7; SOCKET_EAI_NONAME = 8; SOCKET_EAI_SERVICE = 9; SOCKET_EAI_SOCKTYPE = 10; SOCKET_EAI_SYSTEM = 11; SOCKET_EAI_BADHINTS = 12; SOCKET_EAI_PROTOCOL = 13; SOCKET_EAI_OVERFLOW = 14; SOCKET_EAI_MAX = 15; }; repeated bytes packed_address = 2; optional string canonical_name = 3; repeated string aliases = 4; } ================================================ FILE: vendor/google.golang.org/appengine/internal/system/system_service.pb.go ================================================ // Code generated by protoc-gen-go. // source: google.golang.org/appengine/internal/system/system_service.proto // DO NOT EDIT! /* Package system is a generated protocol buffer package. It is generated from these files: google.golang.org/appengine/internal/system/system_service.proto It has these top-level messages: SystemServiceError SystemStat GetSystemStatsRequest GetSystemStatsResponse StartBackgroundRequestRequest StartBackgroundRequestResponse */ package system import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf type SystemServiceError_ErrorCode int32 const ( SystemServiceError_OK SystemServiceError_ErrorCode = 0 SystemServiceError_INTERNAL_ERROR SystemServiceError_ErrorCode = 1 SystemServiceError_BACKEND_REQUIRED SystemServiceError_ErrorCode = 2 SystemServiceError_LIMIT_REACHED SystemServiceError_ErrorCode = 3 ) var SystemServiceError_ErrorCode_name = map[int32]string{ 0: "OK", 1: "INTERNAL_ERROR", 2: "BACKEND_REQUIRED", 3: "LIMIT_REACHED", } var SystemServiceError_ErrorCode_value = map[string]int32{ "OK": 0, "INTERNAL_ERROR": 1, "BACKEND_REQUIRED": 2, "LIMIT_REACHED": 3, } func (x SystemServiceError_ErrorCode) Enum() *SystemServiceError_ErrorCode { p := new(SystemServiceError_ErrorCode) *p = x return p } func (x SystemServiceError_ErrorCode) String() string { return proto.EnumName(SystemServiceError_ErrorCode_name, int32(x)) } func (x *SystemServiceError_ErrorCode) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(SystemServiceError_ErrorCode_value, data, "SystemServiceError_ErrorCode") if err != nil { return err } *x = SystemServiceError_ErrorCode(value) return nil } type SystemServiceError struct { XXX_unrecognized []byte `json:"-"` } func (m *SystemServiceError) Reset() { *m = SystemServiceError{} } func (m *SystemServiceError) String() string { return proto.CompactTextString(m) } func (*SystemServiceError) ProtoMessage() {} type SystemStat struct { // Instaneous value of this stat. Current *float64 `protobuf:"fixed64,1,opt,name=current" json:"current,omitempty"` // Average over time, if this stat has an instaneous value. Average1M *float64 `protobuf:"fixed64,3,opt,name=average1m" json:"average1m,omitempty"` Average10M *float64 `protobuf:"fixed64,4,opt,name=average10m" json:"average10m,omitempty"` // Total value, if the stat accumulates over time. Total *float64 `protobuf:"fixed64,2,opt,name=total" json:"total,omitempty"` // Rate over time, if this stat accumulates. Rate1M *float64 `protobuf:"fixed64,5,opt,name=rate1m" json:"rate1m,omitempty"` Rate10M *float64 `protobuf:"fixed64,6,opt,name=rate10m" json:"rate10m,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *SystemStat) Reset() { *m = SystemStat{} } func (m *SystemStat) String() string { return proto.CompactTextString(m) } func (*SystemStat) ProtoMessage() {} func (m *SystemStat) GetCurrent() float64 { if m != nil && m.Current != nil { return *m.Current } return 0 } func (m *SystemStat) GetAverage1M() float64 { if m != nil && m.Average1M != nil { return *m.Average1M } return 0 } func (m *SystemStat) GetAverage10M() float64 { if m != nil && m.Average10M != nil { return *m.Average10M } return 0 } func (m *SystemStat) GetTotal() float64 { if m != nil && m.Total != nil { return *m.Total } return 0 } func (m *SystemStat) GetRate1M() float64 { if m != nil && m.Rate1M != nil { return *m.Rate1M } return 0 } func (m *SystemStat) GetRate10M() float64 { if m != nil && m.Rate10M != nil { return *m.Rate10M } return 0 } type GetSystemStatsRequest struct { XXX_unrecognized []byte `json:"-"` } func (m *GetSystemStatsRequest) Reset() { *m = GetSystemStatsRequest{} } func (m *GetSystemStatsRequest) String() string { return proto.CompactTextString(m) } func (*GetSystemStatsRequest) ProtoMessage() {} type GetSystemStatsResponse struct { // CPU used by this instance, in mcycles. Cpu *SystemStat `protobuf:"bytes,1,opt,name=cpu" json:"cpu,omitempty"` // Physical memory (RAM) used by this instance, in megabytes. Memory *SystemStat `protobuf:"bytes,2,opt,name=memory" json:"memory,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *GetSystemStatsResponse) Reset() { *m = GetSystemStatsResponse{} } func (m *GetSystemStatsResponse) String() string { return proto.CompactTextString(m) } func (*GetSystemStatsResponse) ProtoMessage() {} func (m *GetSystemStatsResponse) GetCpu() *SystemStat { if m != nil { return m.Cpu } return nil } func (m *GetSystemStatsResponse) GetMemory() *SystemStat { if m != nil { return m.Memory } return nil } type StartBackgroundRequestRequest struct { XXX_unrecognized []byte `json:"-"` } func (m *StartBackgroundRequestRequest) Reset() { *m = StartBackgroundRequestRequest{} } func (m *StartBackgroundRequestRequest) String() string { return proto.CompactTextString(m) } func (*StartBackgroundRequestRequest) ProtoMessage() {} type StartBackgroundRequestResponse struct { // Every /_ah/background request will have an X-AppEngine-BackgroundRequest // header, whose value will be equal to this parameter, the request_id. RequestId *string `protobuf:"bytes,1,opt,name=request_id" json:"request_id,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *StartBackgroundRequestResponse) Reset() { *m = StartBackgroundRequestResponse{} } func (m *StartBackgroundRequestResponse) String() string { return proto.CompactTextString(m) } func (*StartBackgroundRequestResponse) ProtoMessage() {} func (m *StartBackgroundRequestResponse) GetRequestId() string { if m != nil && m.RequestId != nil { return *m.RequestId } return "" } func init() { } ================================================ FILE: vendor/google.golang.org/appengine/internal/system/system_service.proto ================================================ syntax = "proto2"; option go_package = "system"; package appengine; message SystemServiceError { enum ErrorCode { OK = 0; INTERNAL_ERROR = 1; BACKEND_REQUIRED = 2; LIMIT_REACHED = 3; } } message SystemStat { // Instaneous value of this stat. optional double current = 1; // Average over time, if this stat has an instaneous value. optional double average1m = 3; optional double average10m = 4; // Total value, if the stat accumulates over time. optional double total = 2; // Rate over time, if this stat accumulates. optional double rate1m = 5; optional double rate10m = 6; } message GetSystemStatsRequest { } message GetSystemStatsResponse { // CPU used by this instance, in mcycles. optional SystemStat cpu = 1; // Physical memory (RAM) used by this instance, in megabytes. optional SystemStat memory = 2; } message StartBackgroundRequestRequest { } message StartBackgroundRequestResponse { // Every /_ah/background request will have an X-AppEngine-BackgroundRequest // header, whose value will be equal to this parameter, the request_id. optional string request_id = 1; } ================================================ FILE: vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.pb.go ================================================ // Code generated by protoc-gen-go. // source: google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto // DO NOT EDIT! /* Package taskqueue is a generated protocol buffer package. It is generated from these files: google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto It has these top-level messages: TaskQueueServiceError TaskPayload TaskQueueRetryParameters TaskQueueAcl TaskQueueHttpHeader TaskQueueMode TaskQueueAddRequest TaskQueueAddResponse TaskQueueBulkAddRequest TaskQueueBulkAddResponse TaskQueueDeleteRequest TaskQueueDeleteResponse TaskQueueForceRunRequest TaskQueueForceRunResponse TaskQueueUpdateQueueRequest TaskQueueUpdateQueueResponse TaskQueueFetchQueuesRequest TaskQueueFetchQueuesResponse TaskQueueFetchQueueStatsRequest TaskQueueScannerQueueInfo TaskQueueFetchQueueStatsResponse TaskQueuePauseQueueRequest TaskQueuePauseQueueResponse TaskQueuePurgeQueueRequest TaskQueuePurgeQueueResponse TaskQueueDeleteQueueRequest TaskQueueDeleteQueueResponse TaskQueueDeleteGroupRequest TaskQueueDeleteGroupResponse TaskQueueQueryTasksRequest TaskQueueQueryTasksResponse TaskQueueFetchTaskRequest TaskQueueFetchTaskResponse TaskQueueUpdateStorageLimitRequest TaskQueueUpdateStorageLimitResponse TaskQueueQueryAndOwnTasksRequest TaskQueueQueryAndOwnTasksResponse TaskQueueModifyTaskLeaseRequest TaskQueueModifyTaskLeaseResponse */ package taskqueue import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import appengine "google.golang.org/appengine/internal/datastore" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf type TaskQueueServiceError_ErrorCode int32 const ( TaskQueueServiceError_OK TaskQueueServiceError_ErrorCode = 0 TaskQueueServiceError_UNKNOWN_QUEUE TaskQueueServiceError_ErrorCode = 1 TaskQueueServiceError_TRANSIENT_ERROR TaskQueueServiceError_ErrorCode = 2 TaskQueueServiceError_INTERNAL_ERROR TaskQueueServiceError_ErrorCode = 3 TaskQueueServiceError_TASK_TOO_LARGE TaskQueueServiceError_ErrorCode = 4 TaskQueueServiceError_INVALID_TASK_NAME TaskQueueServiceError_ErrorCode = 5 TaskQueueServiceError_INVALID_QUEUE_NAME TaskQueueServiceError_ErrorCode = 6 TaskQueueServiceError_INVALID_URL TaskQueueServiceError_ErrorCode = 7 TaskQueueServiceError_INVALID_QUEUE_RATE TaskQueueServiceError_ErrorCode = 8 TaskQueueServiceError_PERMISSION_DENIED TaskQueueServiceError_ErrorCode = 9 TaskQueueServiceError_TASK_ALREADY_EXISTS TaskQueueServiceError_ErrorCode = 10 TaskQueueServiceError_TOMBSTONED_TASK TaskQueueServiceError_ErrorCode = 11 TaskQueueServiceError_INVALID_ETA TaskQueueServiceError_ErrorCode = 12 TaskQueueServiceError_INVALID_REQUEST TaskQueueServiceError_ErrorCode = 13 TaskQueueServiceError_UNKNOWN_TASK TaskQueueServiceError_ErrorCode = 14 TaskQueueServiceError_TOMBSTONED_QUEUE TaskQueueServiceError_ErrorCode = 15 TaskQueueServiceError_DUPLICATE_TASK_NAME TaskQueueServiceError_ErrorCode = 16 TaskQueueServiceError_SKIPPED TaskQueueServiceError_ErrorCode = 17 TaskQueueServiceError_TOO_MANY_TASKS TaskQueueServiceError_ErrorCode = 18 TaskQueueServiceError_INVALID_PAYLOAD TaskQueueServiceError_ErrorCode = 19 TaskQueueServiceError_INVALID_RETRY_PARAMETERS TaskQueueServiceError_ErrorCode = 20 TaskQueueServiceError_INVALID_QUEUE_MODE TaskQueueServiceError_ErrorCode = 21 TaskQueueServiceError_ACL_LOOKUP_ERROR TaskQueueServiceError_ErrorCode = 22 TaskQueueServiceError_TRANSACTIONAL_REQUEST_TOO_LARGE TaskQueueServiceError_ErrorCode = 23 TaskQueueServiceError_INCORRECT_CREATOR_NAME TaskQueueServiceError_ErrorCode = 24 TaskQueueServiceError_TASK_LEASE_EXPIRED TaskQueueServiceError_ErrorCode = 25 TaskQueueServiceError_QUEUE_PAUSED TaskQueueServiceError_ErrorCode = 26 TaskQueueServiceError_INVALID_TAG TaskQueueServiceError_ErrorCode = 27 // Reserved range for the Datastore error codes. // Original Datastore error code is shifted by DATASTORE_ERROR offset. TaskQueueServiceError_DATASTORE_ERROR TaskQueueServiceError_ErrorCode = 10000 ) var TaskQueueServiceError_ErrorCode_name = map[int32]string{ 0: "OK", 1: "UNKNOWN_QUEUE", 2: "TRANSIENT_ERROR", 3: "INTERNAL_ERROR", 4: "TASK_TOO_LARGE", 5: "INVALID_TASK_NAME", 6: "INVALID_QUEUE_NAME", 7: "INVALID_URL", 8: "INVALID_QUEUE_RATE", 9: "PERMISSION_DENIED", 10: "TASK_ALREADY_EXISTS", 11: "TOMBSTONED_TASK", 12: "INVALID_ETA", 13: "INVALID_REQUEST", 14: "UNKNOWN_TASK", 15: "TOMBSTONED_QUEUE", 16: "DUPLICATE_TASK_NAME", 17: "SKIPPED", 18: "TOO_MANY_TASKS", 19: "INVALID_PAYLOAD", 20: "INVALID_RETRY_PARAMETERS", 21: "INVALID_QUEUE_MODE", 22: "ACL_LOOKUP_ERROR", 23: "TRANSACTIONAL_REQUEST_TOO_LARGE", 24: "INCORRECT_CREATOR_NAME", 25: "TASK_LEASE_EXPIRED", 26: "QUEUE_PAUSED", 27: "INVALID_TAG", 10000: "DATASTORE_ERROR", } var TaskQueueServiceError_ErrorCode_value = map[string]int32{ "OK": 0, "UNKNOWN_QUEUE": 1, "TRANSIENT_ERROR": 2, "INTERNAL_ERROR": 3, "TASK_TOO_LARGE": 4, "INVALID_TASK_NAME": 5, "INVALID_QUEUE_NAME": 6, "INVALID_URL": 7, "INVALID_QUEUE_RATE": 8, "PERMISSION_DENIED": 9, "TASK_ALREADY_EXISTS": 10, "TOMBSTONED_TASK": 11, "INVALID_ETA": 12, "INVALID_REQUEST": 13, "UNKNOWN_TASK": 14, "TOMBSTONED_QUEUE": 15, "DUPLICATE_TASK_NAME": 16, "SKIPPED": 17, "TOO_MANY_TASKS": 18, "INVALID_PAYLOAD": 19, "INVALID_RETRY_PARAMETERS": 20, "INVALID_QUEUE_MODE": 21, "ACL_LOOKUP_ERROR": 22, "TRANSACTIONAL_REQUEST_TOO_LARGE": 23, "INCORRECT_CREATOR_NAME": 24, "TASK_LEASE_EXPIRED": 25, "QUEUE_PAUSED": 26, "INVALID_TAG": 27, "DATASTORE_ERROR": 10000, } func (x TaskQueueServiceError_ErrorCode) Enum() *TaskQueueServiceError_ErrorCode { p := new(TaskQueueServiceError_ErrorCode) *p = x return p } func (x TaskQueueServiceError_ErrorCode) String() string { return proto.EnumName(TaskQueueServiceError_ErrorCode_name, int32(x)) } func (x *TaskQueueServiceError_ErrorCode) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(TaskQueueServiceError_ErrorCode_value, data, "TaskQueueServiceError_ErrorCode") if err != nil { return err } *x = TaskQueueServiceError_ErrorCode(value) return nil } type TaskQueueMode_Mode int32 const ( TaskQueueMode_PUSH TaskQueueMode_Mode = 0 TaskQueueMode_PULL TaskQueueMode_Mode = 1 ) var TaskQueueMode_Mode_name = map[int32]string{ 0: "PUSH", 1: "PULL", } var TaskQueueMode_Mode_value = map[string]int32{ "PUSH": 0, "PULL": 1, } func (x TaskQueueMode_Mode) Enum() *TaskQueueMode_Mode { p := new(TaskQueueMode_Mode) *p = x return p } func (x TaskQueueMode_Mode) String() string { return proto.EnumName(TaskQueueMode_Mode_name, int32(x)) } func (x *TaskQueueMode_Mode) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(TaskQueueMode_Mode_value, data, "TaskQueueMode_Mode") if err != nil { return err } *x = TaskQueueMode_Mode(value) return nil } type TaskQueueAddRequest_RequestMethod int32 const ( TaskQueueAddRequest_GET TaskQueueAddRequest_RequestMethod = 1 TaskQueueAddRequest_POST TaskQueueAddRequest_RequestMethod = 2 TaskQueueAddRequest_HEAD TaskQueueAddRequest_RequestMethod = 3 TaskQueueAddRequest_PUT TaskQueueAddRequest_RequestMethod = 4 TaskQueueAddRequest_DELETE TaskQueueAddRequest_RequestMethod = 5 ) var TaskQueueAddRequest_RequestMethod_name = map[int32]string{ 1: "GET", 2: "POST", 3: "HEAD", 4: "PUT", 5: "DELETE", } var TaskQueueAddRequest_RequestMethod_value = map[string]int32{ "GET": 1, "POST": 2, "HEAD": 3, "PUT": 4, "DELETE": 5, } func (x TaskQueueAddRequest_RequestMethod) Enum() *TaskQueueAddRequest_RequestMethod { p := new(TaskQueueAddRequest_RequestMethod) *p = x return p } func (x TaskQueueAddRequest_RequestMethod) String() string { return proto.EnumName(TaskQueueAddRequest_RequestMethod_name, int32(x)) } func (x *TaskQueueAddRequest_RequestMethod) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(TaskQueueAddRequest_RequestMethod_value, data, "TaskQueueAddRequest_RequestMethod") if err != nil { return err } *x = TaskQueueAddRequest_RequestMethod(value) return nil } type TaskQueueQueryTasksResponse_Task_RequestMethod int32 const ( TaskQueueQueryTasksResponse_Task_GET TaskQueueQueryTasksResponse_Task_RequestMethod = 1 TaskQueueQueryTasksResponse_Task_POST TaskQueueQueryTasksResponse_Task_RequestMethod = 2 TaskQueueQueryTasksResponse_Task_HEAD TaskQueueQueryTasksResponse_Task_RequestMethod = 3 TaskQueueQueryTasksResponse_Task_PUT TaskQueueQueryTasksResponse_Task_RequestMethod = 4 TaskQueueQueryTasksResponse_Task_DELETE TaskQueueQueryTasksResponse_Task_RequestMethod = 5 ) var TaskQueueQueryTasksResponse_Task_RequestMethod_name = map[int32]string{ 1: "GET", 2: "POST", 3: "HEAD", 4: "PUT", 5: "DELETE", } var TaskQueueQueryTasksResponse_Task_RequestMethod_value = map[string]int32{ "GET": 1, "POST": 2, "HEAD": 3, "PUT": 4, "DELETE": 5, } func (x TaskQueueQueryTasksResponse_Task_RequestMethod) Enum() *TaskQueueQueryTasksResponse_Task_RequestMethod { p := new(TaskQueueQueryTasksResponse_Task_RequestMethod) *p = x return p } func (x TaskQueueQueryTasksResponse_Task_RequestMethod) String() string { return proto.EnumName(TaskQueueQueryTasksResponse_Task_RequestMethod_name, int32(x)) } func (x *TaskQueueQueryTasksResponse_Task_RequestMethod) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(TaskQueueQueryTasksResponse_Task_RequestMethod_value, data, "TaskQueueQueryTasksResponse_Task_RequestMethod") if err != nil { return err } *x = TaskQueueQueryTasksResponse_Task_RequestMethod(value) return nil } type TaskQueueServiceError struct { XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueServiceError) Reset() { *m = TaskQueueServiceError{} } func (m *TaskQueueServiceError) String() string { return proto.CompactTextString(m) } func (*TaskQueueServiceError) ProtoMessage() {} type TaskPayload struct { XXX_extensions map[int32]proto.Extension `json:"-"` XXX_unrecognized []byte `json:"-"` } func (m *TaskPayload) Reset() { *m = TaskPayload{} } func (m *TaskPayload) String() string { return proto.CompactTextString(m) } func (*TaskPayload) ProtoMessage() {} func (m *TaskPayload) Marshal() ([]byte, error) { return proto.MarshalMessageSet(m.ExtensionMap()) } func (m *TaskPayload) Unmarshal(buf []byte) error { return proto.UnmarshalMessageSet(buf, m.ExtensionMap()) } func (m *TaskPayload) MarshalJSON() ([]byte, error) { return proto.MarshalMessageSetJSON(m.XXX_extensions) } func (m *TaskPayload) UnmarshalJSON(buf []byte) error { return proto.UnmarshalMessageSetJSON(buf, m.XXX_extensions) } // ensure TaskPayload satisfies proto.Marshaler and proto.Unmarshaler var _ proto.Marshaler = (*TaskPayload)(nil) var _ proto.Unmarshaler = (*TaskPayload)(nil) var extRange_TaskPayload = []proto.ExtensionRange{ {10, 2147483646}, } func (*TaskPayload) ExtensionRangeArray() []proto.ExtensionRange { return extRange_TaskPayload } func (m *TaskPayload) ExtensionMap() map[int32]proto.Extension { if m.XXX_extensions == nil { m.XXX_extensions = make(map[int32]proto.Extension) } return m.XXX_extensions } type TaskQueueRetryParameters struct { RetryLimit *int32 `protobuf:"varint,1,opt,name=retry_limit" json:"retry_limit,omitempty"` AgeLimitSec *int64 `protobuf:"varint,2,opt,name=age_limit_sec" json:"age_limit_sec,omitempty"` MinBackoffSec *float64 `protobuf:"fixed64,3,opt,name=min_backoff_sec,def=0.1" json:"min_backoff_sec,omitempty"` MaxBackoffSec *float64 `protobuf:"fixed64,4,opt,name=max_backoff_sec,def=3600" json:"max_backoff_sec,omitempty"` MaxDoublings *int32 `protobuf:"varint,5,opt,name=max_doublings,def=16" json:"max_doublings,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueRetryParameters) Reset() { *m = TaskQueueRetryParameters{} } func (m *TaskQueueRetryParameters) String() string { return proto.CompactTextString(m) } func (*TaskQueueRetryParameters) ProtoMessage() {} const Default_TaskQueueRetryParameters_MinBackoffSec float64 = 0.1 const Default_TaskQueueRetryParameters_MaxBackoffSec float64 = 3600 const Default_TaskQueueRetryParameters_MaxDoublings int32 = 16 func (m *TaskQueueRetryParameters) GetRetryLimit() int32 { if m != nil && m.RetryLimit != nil { return *m.RetryLimit } return 0 } func (m *TaskQueueRetryParameters) GetAgeLimitSec() int64 { if m != nil && m.AgeLimitSec != nil { return *m.AgeLimitSec } return 0 } func (m *TaskQueueRetryParameters) GetMinBackoffSec() float64 { if m != nil && m.MinBackoffSec != nil { return *m.MinBackoffSec } return Default_TaskQueueRetryParameters_MinBackoffSec } func (m *TaskQueueRetryParameters) GetMaxBackoffSec() float64 { if m != nil && m.MaxBackoffSec != nil { return *m.MaxBackoffSec } return Default_TaskQueueRetryParameters_MaxBackoffSec } func (m *TaskQueueRetryParameters) GetMaxDoublings() int32 { if m != nil && m.MaxDoublings != nil { return *m.MaxDoublings } return Default_TaskQueueRetryParameters_MaxDoublings } type TaskQueueAcl struct { UserEmail [][]byte `protobuf:"bytes,1,rep,name=user_email" json:"user_email,omitempty"` WriterEmail [][]byte `protobuf:"bytes,2,rep,name=writer_email" json:"writer_email,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueAcl) Reset() { *m = TaskQueueAcl{} } func (m *TaskQueueAcl) String() string { return proto.CompactTextString(m) } func (*TaskQueueAcl) ProtoMessage() {} func (m *TaskQueueAcl) GetUserEmail() [][]byte { if m != nil { return m.UserEmail } return nil } func (m *TaskQueueAcl) GetWriterEmail() [][]byte { if m != nil { return m.WriterEmail } return nil } type TaskQueueHttpHeader struct { Key []byte `protobuf:"bytes,1,req,name=key" json:"key,omitempty"` Value []byte `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueHttpHeader) Reset() { *m = TaskQueueHttpHeader{} } func (m *TaskQueueHttpHeader) String() string { return proto.CompactTextString(m) } func (*TaskQueueHttpHeader) ProtoMessage() {} func (m *TaskQueueHttpHeader) GetKey() []byte { if m != nil { return m.Key } return nil } func (m *TaskQueueHttpHeader) GetValue() []byte { if m != nil { return m.Value } return nil } type TaskQueueMode struct { XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueMode) Reset() { *m = TaskQueueMode{} } func (m *TaskQueueMode) String() string { return proto.CompactTextString(m) } func (*TaskQueueMode) ProtoMessage() {} type TaskQueueAddRequest struct { QueueName []byte `protobuf:"bytes,1,req,name=queue_name" json:"queue_name,omitempty"` TaskName []byte `protobuf:"bytes,2,req,name=task_name" json:"task_name,omitempty"` EtaUsec *int64 `protobuf:"varint,3,req,name=eta_usec" json:"eta_usec,omitempty"` Method *TaskQueueAddRequest_RequestMethod `protobuf:"varint,5,opt,name=method,enum=appengine.TaskQueueAddRequest_RequestMethod,def=2" json:"method,omitempty"` Url []byte `protobuf:"bytes,4,opt,name=url" json:"url,omitempty"` Header []*TaskQueueAddRequest_Header `protobuf:"group,6,rep,name=Header" json:"header,omitempty"` Body []byte `protobuf:"bytes,9,opt,name=body" json:"body,omitempty"` Transaction *appengine.Transaction `protobuf:"bytes,10,opt,name=transaction" json:"transaction,omitempty"` AppId []byte `protobuf:"bytes,11,opt,name=app_id" json:"app_id,omitempty"` Crontimetable *TaskQueueAddRequest_CronTimetable `protobuf:"group,12,opt,name=CronTimetable" json:"crontimetable,omitempty"` Description []byte `protobuf:"bytes,15,opt,name=description" json:"description,omitempty"` Payload *TaskPayload `protobuf:"bytes,16,opt,name=payload" json:"payload,omitempty"` RetryParameters *TaskQueueRetryParameters `protobuf:"bytes,17,opt,name=retry_parameters" json:"retry_parameters,omitempty"` Mode *TaskQueueMode_Mode `protobuf:"varint,18,opt,name=mode,enum=appengine.TaskQueueMode_Mode,def=0" json:"mode,omitempty"` Tag []byte `protobuf:"bytes,19,opt,name=tag" json:"tag,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueAddRequest) Reset() { *m = TaskQueueAddRequest{} } func (m *TaskQueueAddRequest) String() string { return proto.CompactTextString(m) } func (*TaskQueueAddRequest) ProtoMessage() {} const Default_TaskQueueAddRequest_Method TaskQueueAddRequest_RequestMethod = TaskQueueAddRequest_POST const Default_TaskQueueAddRequest_Mode TaskQueueMode_Mode = TaskQueueMode_PUSH func (m *TaskQueueAddRequest) GetQueueName() []byte { if m != nil { return m.QueueName } return nil } func (m *TaskQueueAddRequest) GetTaskName() []byte { if m != nil { return m.TaskName } return nil } func (m *TaskQueueAddRequest) GetEtaUsec() int64 { if m != nil && m.EtaUsec != nil { return *m.EtaUsec } return 0 } func (m *TaskQueueAddRequest) GetMethod() TaskQueueAddRequest_RequestMethod { if m != nil && m.Method != nil { return *m.Method } return Default_TaskQueueAddRequest_Method } func (m *TaskQueueAddRequest) GetUrl() []byte { if m != nil { return m.Url } return nil } func (m *TaskQueueAddRequest) GetHeader() []*TaskQueueAddRequest_Header { if m != nil { return m.Header } return nil } func (m *TaskQueueAddRequest) GetBody() []byte { if m != nil { return m.Body } return nil } func (m *TaskQueueAddRequest) GetTransaction() *appengine.Transaction { if m != nil { return m.Transaction } return nil } func (m *TaskQueueAddRequest) GetAppId() []byte { if m != nil { return m.AppId } return nil } func (m *TaskQueueAddRequest) GetCrontimetable() *TaskQueueAddRequest_CronTimetable { if m != nil { return m.Crontimetable } return nil } func (m *TaskQueueAddRequest) GetDescription() []byte { if m != nil { return m.Description } return nil } func (m *TaskQueueAddRequest) GetPayload() *TaskPayload { if m != nil { return m.Payload } return nil } func (m *TaskQueueAddRequest) GetRetryParameters() *TaskQueueRetryParameters { if m != nil { return m.RetryParameters } return nil } func (m *TaskQueueAddRequest) GetMode() TaskQueueMode_Mode { if m != nil && m.Mode != nil { return *m.Mode } return Default_TaskQueueAddRequest_Mode } func (m *TaskQueueAddRequest) GetTag() []byte { if m != nil { return m.Tag } return nil } type TaskQueueAddRequest_Header struct { Key []byte `protobuf:"bytes,7,req,name=key" json:"key,omitempty"` Value []byte `protobuf:"bytes,8,req,name=value" json:"value,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueAddRequest_Header) Reset() { *m = TaskQueueAddRequest_Header{} } func (m *TaskQueueAddRequest_Header) String() string { return proto.CompactTextString(m) } func (*TaskQueueAddRequest_Header) ProtoMessage() {} func (m *TaskQueueAddRequest_Header) GetKey() []byte { if m != nil { return m.Key } return nil } func (m *TaskQueueAddRequest_Header) GetValue() []byte { if m != nil { return m.Value } return nil } type TaskQueueAddRequest_CronTimetable struct { Schedule []byte `protobuf:"bytes,13,req,name=schedule" json:"schedule,omitempty"` Timezone []byte `protobuf:"bytes,14,req,name=timezone" json:"timezone,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueAddRequest_CronTimetable) Reset() { *m = TaskQueueAddRequest_CronTimetable{} } func (m *TaskQueueAddRequest_CronTimetable) String() string { return proto.CompactTextString(m) } func (*TaskQueueAddRequest_CronTimetable) ProtoMessage() {} func (m *TaskQueueAddRequest_CronTimetable) GetSchedule() []byte { if m != nil { return m.Schedule } return nil } func (m *TaskQueueAddRequest_CronTimetable) GetTimezone() []byte { if m != nil { return m.Timezone } return nil } type TaskQueueAddResponse struct { ChosenTaskName []byte `protobuf:"bytes,1,opt,name=chosen_task_name" json:"chosen_task_name,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueAddResponse) Reset() { *m = TaskQueueAddResponse{} } func (m *TaskQueueAddResponse) String() string { return proto.CompactTextString(m) } func (*TaskQueueAddResponse) ProtoMessage() {} func (m *TaskQueueAddResponse) GetChosenTaskName() []byte { if m != nil { return m.ChosenTaskName } return nil } type TaskQueueBulkAddRequest struct { AddRequest []*TaskQueueAddRequest `protobuf:"bytes,1,rep,name=add_request" json:"add_request,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueBulkAddRequest) Reset() { *m = TaskQueueBulkAddRequest{} } func (m *TaskQueueBulkAddRequest) String() string { return proto.CompactTextString(m) } func (*TaskQueueBulkAddRequest) ProtoMessage() {} func (m *TaskQueueBulkAddRequest) GetAddRequest() []*TaskQueueAddRequest { if m != nil { return m.AddRequest } return nil } type TaskQueueBulkAddResponse struct { Taskresult []*TaskQueueBulkAddResponse_TaskResult `protobuf:"group,1,rep,name=TaskResult" json:"taskresult,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueBulkAddResponse) Reset() { *m = TaskQueueBulkAddResponse{} } func (m *TaskQueueBulkAddResponse) String() string { return proto.CompactTextString(m) } func (*TaskQueueBulkAddResponse) ProtoMessage() {} func (m *TaskQueueBulkAddResponse) GetTaskresult() []*TaskQueueBulkAddResponse_TaskResult { if m != nil { return m.Taskresult } return nil } type TaskQueueBulkAddResponse_TaskResult struct { Result *TaskQueueServiceError_ErrorCode `protobuf:"varint,2,req,name=result,enum=appengine.TaskQueueServiceError_ErrorCode" json:"result,omitempty"` ChosenTaskName []byte `protobuf:"bytes,3,opt,name=chosen_task_name" json:"chosen_task_name,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueBulkAddResponse_TaskResult) Reset() { *m = TaskQueueBulkAddResponse_TaskResult{} } func (m *TaskQueueBulkAddResponse_TaskResult) String() string { return proto.CompactTextString(m) } func (*TaskQueueBulkAddResponse_TaskResult) ProtoMessage() {} func (m *TaskQueueBulkAddResponse_TaskResult) GetResult() TaskQueueServiceError_ErrorCode { if m != nil && m.Result != nil { return *m.Result } return TaskQueueServiceError_OK } func (m *TaskQueueBulkAddResponse_TaskResult) GetChosenTaskName() []byte { if m != nil { return m.ChosenTaskName } return nil } type TaskQueueDeleteRequest struct { QueueName []byte `protobuf:"bytes,1,req,name=queue_name" json:"queue_name,omitempty"` TaskName [][]byte `protobuf:"bytes,2,rep,name=task_name" json:"task_name,omitempty"` AppId []byte `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueDeleteRequest) Reset() { *m = TaskQueueDeleteRequest{} } func (m *TaskQueueDeleteRequest) String() string { return proto.CompactTextString(m) } func (*TaskQueueDeleteRequest) ProtoMessage() {} func (m *TaskQueueDeleteRequest) GetQueueName() []byte { if m != nil { return m.QueueName } return nil } func (m *TaskQueueDeleteRequest) GetTaskName() [][]byte { if m != nil { return m.TaskName } return nil } func (m *TaskQueueDeleteRequest) GetAppId() []byte { if m != nil { return m.AppId } return nil } type TaskQueueDeleteResponse struct { Result []TaskQueueServiceError_ErrorCode `protobuf:"varint,3,rep,name=result,enum=appengine.TaskQueueServiceError_ErrorCode" json:"result,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueDeleteResponse) Reset() { *m = TaskQueueDeleteResponse{} } func (m *TaskQueueDeleteResponse) String() string { return proto.CompactTextString(m) } func (*TaskQueueDeleteResponse) ProtoMessage() {} func (m *TaskQueueDeleteResponse) GetResult() []TaskQueueServiceError_ErrorCode { if m != nil { return m.Result } return nil } type TaskQueueForceRunRequest struct { AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"` QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"` TaskName []byte `protobuf:"bytes,3,req,name=task_name" json:"task_name,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueForceRunRequest) Reset() { *m = TaskQueueForceRunRequest{} } func (m *TaskQueueForceRunRequest) String() string { return proto.CompactTextString(m) } func (*TaskQueueForceRunRequest) ProtoMessage() {} func (m *TaskQueueForceRunRequest) GetAppId() []byte { if m != nil { return m.AppId } return nil } func (m *TaskQueueForceRunRequest) GetQueueName() []byte { if m != nil { return m.QueueName } return nil } func (m *TaskQueueForceRunRequest) GetTaskName() []byte { if m != nil { return m.TaskName } return nil } type TaskQueueForceRunResponse struct { Result *TaskQueueServiceError_ErrorCode `protobuf:"varint,3,req,name=result,enum=appengine.TaskQueueServiceError_ErrorCode" json:"result,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueForceRunResponse) Reset() { *m = TaskQueueForceRunResponse{} } func (m *TaskQueueForceRunResponse) String() string { return proto.CompactTextString(m) } func (*TaskQueueForceRunResponse) ProtoMessage() {} func (m *TaskQueueForceRunResponse) GetResult() TaskQueueServiceError_ErrorCode { if m != nil && m.Result != nil { return *m.Result } return TaskQueueServiceError_OK } type TaskQueueUpdateQueueRequest struct { AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"` QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"` BucketRefillPerSecond *float64 `protobuf:"fixed64,3,req,name=bucket_refill_per_second" json:"bucket_refill_per_second,omitempty"` BucketCapacity *int32 `protobuf:"varint,4,req,name=bucket_capacity" json:"bucket_capacity,omitempty"` UserSpecifiedRate *string `protobuf:"bytes,5,opt,name=user_specified_rate" json:"user_specified_rate,omitempty"` RetryParameters *TaskQueueRetryParameters `protobuf:"bytes,6,opt,name=retry_parameters" json:"retry_parameters,omitempty"` MaxConcurrentRequests *int32 `protobuf:"varint,7,opt,name=max_concurrent_requests" json:"max_concurrent_requests,omitempty"` Mode *TaskQueueMode_Mode `protobuf:"varint,8,opt,name=mode,enum=appengine.TaskQueueMode_Mode,def=0" json:"mode,omitempty"` Acl *TaskQueueAcl `protobuf:"bytes,9,opt,name=acl" json:"acl,omitempty"` HeaderOverride []*TaskQueueHttpHeader `protobuf:"bytes,10,rep,name=header_override" json:"header_override,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueUpdateQueueRequest) Reset() { *m = TaskQueueUpdateQueueRequest{} } func (m *TaskQueueUpdateQueueRequest) String() string { return proto.CompactTextString(m) } func (*TaskQueueUpdateQueueRequest) ProtoMessage() {} const Default_TaskQueueUpdateQueueRequest_Mode TaskQueueMode_Mode = TaskQueueMode_PUSH func (m *TaskQueueUpdateQueueRequest) GetAppId() []byte { if m != nil { return m.AppId } return nil } func (m *TaskQueueUpdateQueueRequest) GetQueueName() []byte { if m != nil { return m.QueueName } return nil } func (m *TaskQueueUpdateQueueRequest) GetBucketRefillPerSecond() float64 { if m != nil && m.BucketRefillPerSecond != nil { return *m.BucketRefillPerSecond } return 0 } func (m *TaskQueueUpdateQueueRequest) GetBucketCapacity() int32 { if m != nil && m.BucketCapacity != nil { return *m.BucketCapacity } return 0 } func (m *TaskQueueUpdateQueueRequest) GetUserSpecifiedRate() string { if m != nil && m.UserSpecifiedRate != nil { return *m.UserSpecifiedRate } return "" } func (m *TaskQueueUpdateQueueRequest) GetRetryParameters() *TaskQueueRetryParameters { if m != nil { return m.RetryParameters } return nil } func (m *TaskQueueUpdateQueueRequest) GetMaxConcurrentRequests() int32 { if m != nil && m.MaxConcurrentRequests != nil { return *m.MaxConcurrentRequests } return 0 } func (m *TaskQueueUpdateQueueRequest) GetMode() TaskQueueMode_Mode { if m != nil && m.Mode != nil { return *m.Mode } return Default_TaskQueueUpdateQueueRequest_Mode } func (m *TaskQueueUpdateQueueRequest) GetAcl() *TaskQueueAcl { if m != nil { return m.Acl } return nil } func (m *TaskQueueUpdateQueueRequest) GetHeaderOverride() []*TaskQueueHttpHeader { if m != nil { return m.HeaderOverride } return nil } type TaskQueueUpdateQueueResponse struct { XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueUpdateQueueResponse) Reset() { *m = TaskQueueUpdateQueueResponse{} } func (m *TaskQueueUpdateQueueResponse) String() string { return proto.CompactTextString(m) } func (*TaskQueueUpdateQueueResponse) ProtoMessage() {} type TaskQueueFetchQueuesRequest struct { AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"` MaxRows *int32 `protobuf:"varint,2,req,name=max_rows" json:"max_rows,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueFetchQueuesRequest) Reset() { *m = TaskQueueFetchQueuesRequest{} } func (m *TaskQueueFetchQueuesRequest) String() string { return proto.CompactTextString(m) } func (*TaskQueueFetchQueuesRequest) ProtoMessage() {} func (m *TaskQueueFetchQueuesRequest) GetAppId() []byte { if m != nil { return m.AppId } return nil } func (m *TaskQueueFetchQueuesRequest) GetMaxRows() int32 { if m != nil && m.MaxRows != nil { return *m.MaxRows } return 0 } type TaskQueueFetchQueuesResponse struct { Queue []*TaskQueueFetchQueuesResponse_Queue `protobuf:"group,1,rep,name=Queue" json:"queue,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueFetchQueuesResponse) Reset() { *m = TaskQueueFetchQueuesResponse{} } func (m *TaskQueueFetchQueuesResponse) String() string { return proto.CompactTextString(m) } func (*TaskQueueFetchQueuesResponse) ProtoMessage() {} func (m *TaskQueueFetchQueuesResponse) GetQueue() []*TaskQueueFetchQueuesResponse_Queue { if m != nil { return m.Queue } return nil } type TaskQueueFetchQueuesResponse_Queue struct { QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"` BucketRefillPerSecond *float64 `protobuf:"fixed64,3,req,name=bucket_refill_per_second" json:"bucket_refill_per_second,omitempty"` BucketCapacity *float64 `protobuf:"fixed64,4,req,name=bucket_capacity" json:"bucket_capacity,omitempty"` UserSpecifiedRate *string `protobuf:"bytes,5,opt,name=user_specified_rate" json:"user_specified_rate,omitempty"` Paused *bool `protobuf:"varint,6,req,name=paused,def=0" json:"paused,omitempty"` RetryParameters *TaskQueueRetryParameters `protobuf:"bytes,7,opt,name=retry_parameters" json:"retry_parameters,omitempty"` MaxConcurrentRequests *int32 `protobuf:"varint,8,opt,name=max_concurrent_requests" json:"max_concurrent_requests,omitempty"` Mode *TaskQueueMode_Mode `protobuf:"varint,9,opt,name=mode,enum=appengine.TaskQueueMode_Mode,def=0" json:"mode,omitempty"` Acl *TaskQueueAcl `protobuf:"bytes,10,opt,name=acl" json:"acl,omitempty"` HeaderOverride []*TaskQueueHttpHeader `protobuf:"bytes,11,rep,name=header_override" json:"header_override,omitempty"` CreatorName *string `protobuf:"bytes,12,opt,name=creator_name,def=apphosting" json:"creator_name,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueFetchQueuesResponse_Queue) Reset() { *m = TaskQueueFetchQueuesResponse_Queue{} } func (m *TaskQueueFetchQueuesResponse_Queue) String() string { return proto.CompactTextString(m) } func (*TaskQueueFetchQueuesResponse_Queue) ProtoMessage() {} const Default_TaskQueueFetchQueuesResponse_Queue_Paused bool = false const Default_TaskQueueFetchQueuesResponse_Queue_Mode TaskQueueMode_Mode = TaskQueueMode_PUSH const Default_TaskQueueFetchQueuesResponse_Queue_CreatorName string = "apphosting" func (m *TaskQueueFetchQueuesResponse_Queue) GetQueueName() []byte { if m != nil { return m.QueueName } return nil } func (m *TaskQueueFetchQueuesResponse_Queue) GetBucketRefillPerSecond() float64 { if m != nil && m.BucketRefillPerSecond != nil { return *m.BucketRefillPerSecond } return 0 } func (m *TaskQueueFetchQueuesResponse_Queue) GetBucketCapacity() float64 { if m != nil && m.BucketCapacity != nil { return *m.BucketCapacity } return 0 } func (m *TaskQueueFetchQueuesResponse_Queue) GetUserSpecifiedRate() string { if m != nil && m.UserSpecifiedRate != nil { return *m.UserSpecifiedRate } return "" } func (m *TaskQueueFetchQueuesResponse_Queue) GetPaused() bool { if m != nil && m.Paused != nil { return *m.Paused } return Default_TaskQueueFetchQueuesResponse_Queue_Paused } func (m *TaskQueueFetchQueuesResponse_Queue) GetRetryParameters() *TaskQueueRetryParameters { if m != nil { return m.RetryParameters } return nil } func (m *TaskQueueFetchQueuesResponse_Queue) GetMaxConcurrentRequests() int32 { if m != nil && m.MaxConcurrentRequests != nil { return *m.MaxConcurrentRequests } return 0 } func (m *TaskQueueFetchQueuesResponse_Queue) GetMode() TaskQueueMode_Mode { if m != nil && m.Mode != nil { return *m.Mode } return Default_TaskQueueFetchQueuesResponse_Queue_Mode } func (m *TaskQueueFetchQueuesResponse_Queue) GetAcl() *TaskQueueAcl { if m != nil { return m.Acl } return nil } func (m *TaskQueueFetchQueuesResponse_Queue) GetHeaderOverride() []*TaskQueueHttpHeader { if m != nil { return m.HeaderOverride } return nil } func (m *TaskQueueFetchQueuesResponse_Queue) GetCreatorName() string { if m != nil && m.CreatorName != nil { return *m.CreatorName } return Default_TaskQueueFetchQueuesResponse_Queue_CreatorName } type TaskQueueFetchQueueStatsRequest struct { AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"` QueueName [][]byte `protobuf:"bytes,2,rep,name=queue_name" json:"queue_name,omitempty"` MaxNumTasks *int32 `protobuf:"varint,3,opt,name=max_num_tasks,def=0" json:"max_num_tasks,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueFetchQueueStatsRequest) Reset() { *m = TaskQueueFetchQueueStatsRequest{} } func (m *TaskQueueFetchQueueStatsRequest) String() string { return proto.CompactTextString(m) } func (*TaskQueueFetchQueueStatsRequest) ProtoMessage() {} const Default_TaskQueueFetchQueueStatsRequest_MaxNumTasks int32 = 0 func (m *TaskQueueFetchQueueStatsRequest) GetAppId() []byte { if m != nil { return m.AppId } return nil } func (m *TaskQueueFetchQueueStatsRequest) GetQueueName() [][]byte { if m != nil { return m.QueueName } return nil } func (m *TaskQueueFetchQueueStatsRequest) GetMaxNumTasks() int32 { if m != nil && m.MaxNumTasks != nil { return *m.MaxNumTasks } return Default_TaskQueueFetchQueueStatsRequest_MaxNumTasks } type TaskQueueScannerQueueInfo struct { ExecutedLastMinute *int64 `protobuf:"varint,1,req,name=executed_last_minute" json:"executed_last_minute,omitempty"` ExecutedLastHour *int64 `protobuf:"varint,2,req,name=executed_last_hour" json:"executed_last_hour,omitempty"` SamplingDurationSeconds *float64 `protobuf:"fixed64,3,req,name=sampling_duration_seconds" json:"sampling_duration_seconds,omitempty"` RequestsInFlight *int32 `protobuf:"varint,4,opt,name=requests_in_flight" json:"requests_in_flight,omitempty"` EnforcedRate *float64 `protobuf:"fixed64,5,opt,name=enforced_rate" json:"enforced_rate,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueScannerQueueInfo) Reset() { *m = TaskQueueScannerQueueInfo{} } func (m *TaskQueueScannerQueueInfo) String() string { return proto.CompactTextString(m) } func (*TaskQueueScannerQueueInfo) ProtoMessage() {} func (m *TaskQueueScannerQueueInfo) GetExecutedLastMinute() int64 { if m != nil && m.ExecutedLastMinute != nil { return *m.ExecutedLastMinute } return 0 } func (m *TaskQueueScannerQueueInfo) GetExecutedLastHour() int64 { if m != nil && m.ExecutedLastHour != nil { return *m.ExecutedLastHour } return 0 } func (m *TaskQueueScannerQueueInfo) GetSamplingDurationSeconds() float64 { if m != nil && m.SamplingDurationSeconds != nil { return *m.SamplingDurationSeconds } return 0 } func (m *TaskQueueScannerQueueInfo) GetRequestsInFlight() int32 { if m != nil && m.RequestsInFlight != nil { return *m.RequestsInFlight } return 0 } func (m *TaskQueueScannerQueueInfo) GetEnforcedRate() float64 { if m != nil && m.EnforcedRate != nil { return *m.EnforcedRate } return 0 } type TaskQueueFetchQueueStatsResponse struct { Queuestats []*TaskQueueFetchQueueStatsResponse_QueueStats `protobuf:"group,1,rep,name=QueueStats" json:"queuestats,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueFetchQueueStatsResponse) Reset() { *m = TaskQueueFetchQueueStatsResponse{} } func (m *TaskQueueFetchQueueStatsResponse) String() string { return proto.CompactTextString(m) } func (*TaskQueueFetchQueueStatsResponse) ProtoMessage() {} func (m *TaskQueueFetchQueueStatsResponse) GetQueuestats() []*TaskQueueFetchQueueStatsResponse_QueueStats { if m != nil { return m.Queuestats } return nil } type TaskQueueFetchQueueStatsResponse_QueueStats struct { NumTasks *int32 `protobuf:"varint,2,req,name=num_tasks" json:"num_tasks,omitempty"` OldestEtaUsec *int64 `protobuf:"varint,3,req,name=oldest_eta_usec" json:"oldest_eta_usec,omitempty"` ScannerInfo *TaskQueueScannerQueueInfo `protobuf:"bytes,4,opt,name=scanner_info" json:"scanner_info,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueFetchQueueStatsResponse_QueueStats) Reset() { *m = TaskQueueFetchQueueStatsResponse_QueueStats{} } func (m *TaskQueueFetchQueueStatsResponse_QueueStats) String() string { return proto.CompactTextString(m) } func (*TaskQueueFetchQueueStatsResponse_QueueStats) ProtoMessage() {} func (m *TaskQueueFetchQueueStatsResponse_QueueStats) GetNumTasks() int32 { if m != nil && m.NumTasks != nil { return *m.NumTasks } return 0 } func (m *TaskQueueFetchQueueStatsResponse_QueueStats) GetOldestEtaUsec() int64 { if m != nil && m.OldestEtaUsec != nil { return *m.OldestEtaUsec } return 0 } func (m *TaskQueueFetchQueueStatsResponse_QueueStats) GetScannerInfo() *TaskQueueScannerQueueInfo { if m != nil { return m.ScannerInfo } return nil } type TaskQueuePauseQueueRequest struct { AppId []byte `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"` Pause *bool `protobuf:"varint,3,req,name=pause" json:"pause,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueuePauseQueueRequest) Reset() { *m = TaskQueuePauseQueueRequest{} } func (m *TaskQueuePauseQueueRequest) String() string { return proto.CompactTextString(m) } func (*TaskQueuePauseQueueRequest) ProtoMessage() {} func (m *TaskQueuePauseQueueRequest) GetAppId() []byte { if m != nil { return m.AppId } return nil } func (m *TaskQueuePauseQueueRequest) GetQueueName() []byte { if m != nil { return m.QueueName } return nil } func (m *TaskQueuePauseQueueRequest) GetPause() bool { if m != nil && m.Pause != nil { return *m.Pause } return false } type TaskQueuePauseQueueResponse struct { XXX_unrecognized []byte `json:"-"` } func (m *TaskQueuePauseQueueResponse) Reset() { *m = TaskQueuePauseQueueResponse{} } func (m *TaskQueuePauseQueueResponse) String() string { return proto.CompactTextString(m) } func (*TaskQueuePauseQueueResponse) ProtoMessage() {} type TaskQueuePurgeQueueRequest struct { AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"` QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueuePurgeQueueRequest) Reset() { *m = TaskQueuePurgeQueueRequest{} } func (m *TaskQueuePurgeQueueRequest) String() string { return proto.CompactTextString(m) } func (*TaskQueuePurgeQueueRequest) ProtoMessage() {} func (m *TaskQueuePurgeQueueRequest) GetAppId() []byte { if m != nil { return m.AppId } return nil } func (m *TaskQueuePurgeQueueRequest) GetQueueName() []byte { if m != nil { return m.QueueName } return nil } type TaskQueuePurgeQueueResponse struct { XXX_unrecognized []byte `json:"-"` } func (m *TaskQueuePurgeQueueResponse) Reset() { *m = TaskQueuePurgeQueueResponse{} } func (m *TaskQueuePurgeQueueResponse) String() string { return proto.CompactTextString(m) } func (*TaskQueuePurgeQueueResponse) ProtoMessage() {} type TaskQueueDeleteQueueRequest struct { AppId []byte `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueDeleteQueueRequest) Reset() { *m = TaskQueueDeleteQueueRequest{} } func (m *TaskQueueDeleteQueueRequest) String() string { return proto.CompactTextString(m) } func (*TaskQueueDeleteQueueRequest) ProtoMessage() {} func (m *TaskQueueDeleteQueueRequest) GetAppId() []byte { if m != nil { return m.AppId } return nil } func (m *TaskQueueDeleteQueueRequest) GetQueueName() []byte { if m != nil { return m.QueueName } return nil } type TaskQueueDeleteQueueResponse struct { XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueDeleteQueueResponse) Reset() { *m = TaskQueueDeleteQueueResponse{} } func (m *TaskQueueDeleteQueueResponse) String() string { return proto.CompactTextString(m) } func (*TaskQueueDeleteQueueResponse) ProtoMessage() {} type TaskQueueDeleteGroupRequest struct { AppId []byte `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueDeleteGroupRequest) Reset() { *m = TaskQueueDeleteGroupRequest{} } func (m *TaskQueueDeleteGroupRequest) String() string { return proto.CompactTextString(m) } func (*TaskQueueDeleteGroupRequest) ProtoMessage() {} func (m *TaskQueueDeleteGroupRequest) GetAppId() []byte { if m != nil { return m.AppId } return nil } type TaskQueueDeleteGroupResponse struct { XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueDeleteGroupResponse) Reset() { *m = TaskQueueDeleteGroupResponse{} } func (m *TaskQueueDeleteGroupResponse) String() string { return proto.CompactTextString(m) } func (*TaskQueueDeleteGroupResponse) ProtoMessage() {} type TaskQueueQueryTasksRequest struct { AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"` QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"` StartTaskName []byte `protobuf:"bytes,3,opt,name=start_task_name" json:"start_task_name,omitempty"` StartEtaUsec *int64 `protobuf:"varint,4,opt,name=start_eta_usec" json:"start_eta_usec,omitempty"` StartTag []byte `protobuf:"bytes,6,opt,name=start_tag" json:"start_tag,omitempty"` MaxRows *int32 `protobuf:"varint,5,opt,name=max_rows,def=1" json:"max_rows,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueQueryTasksRequest) Reset() { *m = TaskQueueQueryTasksRequest{} } func (m *TaskQueueQueryTasksRequest) String() string { return proto.CompactTextString(m) } func (*TaskQueueQueryTasksRequest) ProtoMessage() {} const Default_TaskQueueQueryTasksRequest_MaxRows int32 = 1 func (m *TaskQueueQueryTasksRequest) GetAppId() []byte { if m != nil { return m.AppId } return nil } func (m *TaskQueueQueryTasksRequest) GetQueueName() []byte { if m != nil { return m.QueueName } return nil } func (m *TaskQueueQueryTasksRequest) GetStartTaskName() []byte { if m != nil { return m.StartTaskName } return nil } func (m *TaskQueueQueryTasksRequest) GetStartEtaUsec() int64 { if m != nil && m.StartEtaUsec != nil { return *m.StartEtaUsec } return 0 } func (m *TaskQueueQueryTasksRequest) GetStartTag() []byte { if m != nil { return m.StartTag } return nil } func (m *TaskQueueQueryTasksRequest) GetMaxRows() int32 { if m != nil && m.MaxRows != nil { return *m.MaxRows } return Default_TaskQueueQueryTasksRequest_MaxRows } type TaskQueueQueryTasksResponse struct { Task []*TaskQueueQueryTasksResponse_Task `protobuf:"group,1,rep,name=Task" json:"task,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueQueryTasksResponse) Reset() { *m = TaskQueueQueryTasksResponse{} } func (m *TaskQueueQueryTasksResponse) String() string { return proto.CompactTextString(m) } func (*TaskQueueQueryTasksResponse) ProtoMessage() {} func (m *TaskQueueQueryTasksResponse) GetTask() []*TaskQueueQueryTasksResponse_Task { if m != nil { return m.Task } return nil } type TaskQueueQueryTasksResponse_Task struct { TaskName []byte `protobuf:"bytes,2,req,name=task_name" json:"task_name,omitempty"` EtaUsec *int64 `protobuf:"varint,3,req,name=eta_usec" json:"eta_usec,omitempty"` Url []byte `protobuf:"bytes,4,opt,name=url" json:"url,omitempty"` Method *TaskQueueQueryTasksResponse_Task_RequestMethod `protobuf:"varint,5,opt,name=method,enum=appengine.TaskQueueQueryTasksResponse_Task_RequestMethod" json:"method,omitempty"` RetryCount *int32 `protobuf:"varint,6,opt,name=retry_count,def=0" json:"retry_count,omitempty"` Header []*TaskQueueQueryTasksResponse_Task_Header `protobuf:"group,7,rep,name=Header" json:"header,omitempty"` BodySize *int32 `protobuf:"varint,10,opt,name=body_size" json:"body_size,omitempty"` Body []byte `protobuf:"bytes,11,opt,name=body" json:"body,omitempty"` CreationTimeUsec *int64 `protobuf:"varint,12,req,name=creation_time_usec" json:"creation_time_usec,omitempty"` Crontimetable *TaskQueueQueryTasksResponse_Task_CronTimetable `protobuf:"group,13,opt,name=CronTimetable" json:"crontimetable,omitempty"` Runlog *TaskQueueQueryTasksResponse_Task_RunLog `protobuf:"group,16,opt,name=RunLog" json:"runlog,omitempty"` Description []byte `protobuf:"bytes,21,opt,name=description" json:"description,omitempty"` Payload *TaskPayload `protobuf:"bytes,22,opt,name=payload" json:"payload,omitempty"` RetryParameters *TaskQueueRetryParameters `protobuf:"bytes,23,opt,name=retry_parameters" json:"retry_parameters,omitempty"` FirstTryUsec *int64 `protobuf:"varint,24,opt,name=first_try_usec" json:"first_try_usec,omitempty"` Tag []byte `protobuf:"bytes,25,opt,name=tag" json:"tag,omitempty"` ExecutionCount *int32 `protobuf:"varint,26,opt,name=execution_count,def=0" json:"execution_count,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueQueryTasksResponse_Task) Reset() { *m = TaskQueueQueryTasksResponse_Task{} } func (m *TaskQueueQueryTasksResponse_Task) String() string { return proto.CompactTextString(m) } func (*TaskQueueQueryTasksResponse_Task) ProtoMessage() {} const Default_TaskQueueQueryTasksResponse_Task_RetryCount int32 = 0 const Default_TaskQueueQueryTasksResponse_Task_ExecutionCount int32 = 0 func (m *TaskQueueQueryTasksResponse_Task) GetTaskName() []byte { if m != nil { return m.TaskName } return nil } func (m *TaskQueueQueryTasksResponse_Task) GetEtaUsec() int64 { if m != nil && m.EtaUsec != nil { return *m.EtaUsec } return 0 } func (m *TaskQueueQueryTasksResponse_Task) GetUrl() []byte { if m != nil { return m.Url } return nil } func (m *TaskQueueQueryTasksResponse_Task) GetMethod() TaskQueueQueryTasksResponse_Task_RequestMethod { if m != nil && m.Method != nil { return *m.Method } return TaskQueueQueryTasksResponse_Task_GET } func (m *TaskQueueQueryTasksResponse_Task) GetRetryCount() int32 { if m != nil && m.RetryCount != nil { return *m.RetryCount } return Default_TaskQueueQueryTasksResponse_Task_RetryCount } func (m *TaskQueueQueryTasksResponse_Task) GetHeader() []*TaskQueueQueryTasksResponse_Task_Header { if m != nil { return m.Header } return nil } func (m *TaskQueueQueryTasksResponse_Task) GetBodySize() int32 { if m != nil && m.BodySize != nil { return *m.BodySize } return 0 } func (m *TaskQueueQueryTasksResponse_Task) GetBody() []byte { if m != nil { return m.Body } return nil } func (m *TaskQueueQueryTasksResponse_Task) GetCreationTimeUsec() int64 { if m != nil && m.CreationTimeUsec != nil { return *m.CreationTimeUsec } return 0 } func (m *TaskQueueQueryTasksResponse_Task) GetCrontimetable() *TaskQueueQueryTasksResponse_Task_CronTimetable { if m != nil { return m.Crontimetable } return nil } func (m *TaskQueueQueryTasksResponse_Task) GetRunlog() *TaskQueueQueryTasksResponse_Task_RunLog { if m != nil { return m.Runlog } return nil } func (m *TaskQueueQueryTasksResponse_Task) GetDescription() []byte { if m != nil { return m.Description } return nil } func (m *TaskQueueQueryTasksResponse_Task) GetPayload() *TaskPayload { if m != nil { return m.Payload } return nil } func (m *TaskQueueQueryTasksResponse_Task) GetRetryParameters() *TaskQueueRetryParameters { if m != nil { return m.RetryParameters } return nil } func (m *TaskQueueQueryTasksResponse_Task) GetFirstTryUsec() int64 { if m != nil && m.FirstTryUsec != nil { return *m.FirstTryUsec } return 0 } func (m *TaskQueueQueryTasksResponse_Task) GetTag() []byte { if m != nil { return m.Tag } return nil } func (m *TaskQueueQueryTasksResponse_Task) GetExecutionCount() int32 { if m != nil && m.ExecutionCount != nil { return *m.ExecutionCount } return Default_TaskQueueQueryTasksResponse_Task_ExecutionCount } type TaskQueueQueryTasksResponse_Task_Header struct { Key []byte `protobuf:"bytes,8,req,name=key" json:"key,omitempty"` Value []byte `protobuf:"bytes,9,req,name=value" json:"value,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueQueryTasksResponse_Task_Header) Reset() { *m = TaskQueueQueryTasksResponse_Task_Header{} } func (m *TaskQueueQueryTasksResponse_Task_Header) String() string { return proto.CompactTextString(m) } func (*TaskQueueQueryTasksResponse_Task_Header) ProtoMessage() {} func (m *TaskQueueQueryTasksResponse_Task_Header) GetKey() []byte { if m != nil { return m.Key } return nil } func (m *TaskQueueQueryTasksResponse_Task_Header) GetValue() []byte { if m != nil { return m.Value } return nil } type TaskQueueQueryTasksResponse_Task_CronTimetable struct { Schedule []byte `protobuf:"bytes,14,req,name=schedule" json:"schedule,omitempty"` Timezone []byte `protobuf:"bytes,15,req,name=timezone" json:"timezone,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueQueryTasksResponse_Task_CronTimetable) Reset() { *m = TaskQueueQueryTasksResponse_Task_CronTimetable{} } func (m *TaskQueueQueryTasksResponse_Task_CronTimetable) String() string { return proto.CompactTextString(m) } func (*TaskQueueQueryTasksResponse_Task_CronTimetable) ProtoMessage() {} func (m *TaskQueueQueryTasksResponse_Task_CronTimetable) GetSchedule() []byte { if m != nil { return m.Schedule } return nil } func (m *TaskQueueQueryTasksResponse_Task_CronTimetable) GetTimezone() []byte { if m != nil { return m.Timezone } return nil } type TaskQueueQueryTasksResponse_Task_RunLog struct { DispatchedUsec *int64 `protobuf:"varint,17,req,name=dispatched_usec" json:"dispatched_usec,omitempty"` LagUsec *int64 `protobuf:"varint,18,req,name=lag_usec" json:"lag_usec,omitempty"` ElapsedUsec *int64 `protobuf:"varint,19,req,name=elapsed_usec" json:"elapsed_usec,omitempty"` ResponseCode *int64 `protobuf:"varint,20,opt,name=response_code" json:"response_code,omitempty"` RetryReason *string `protobuf:"bytes,27,opt,name=retry_reason" json:"retry_reason,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueQueryTasksResponse_Task_RunLog) Reset() { *m = TaskQueueQueryTasksResponse_Task_RunLog{} } func (m *TaskQueueQueryTasksResponse_Task_RunLog) String() string { return proto.CompactTextString(m) } func (*TaskQueueQueryTasksResponse_Task_RunLog) ProtoMessage() {} func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetDispatchedUsec() int64 { if m != nil && m.DispatchedUsec != nil { return *m.DispatchedUsec } return 0 } func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetLagUsec() int64 { if m != nil && m.LagUsec != nil { return *m.LagUsec } return 0 } func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetElapsedUsec() int64 { if m != nil && m.ElapsedUsec != nil { return *m.ElapsedUsec } return 0 } func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetResponseCode() int64 { if m != nil && m.ResponseCode != nil { return *m.ResponseCode } return 0 } func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetRetryReason() string { if m != nil && m.RetryReason != nil { return *m.RetryReason } return "" } type TaskQueueFetchTaskRequest struct { AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"` QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"` TaskName []byte `protobuf:"bytes,3,req,name=task_name" json:"task_name,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueFetchTaskRequest) Reset() { *m = TaskQueueFetchTaskRequest{} } func (m *TaskQueueFetchTaskRequest) String() string { return proto.CompactTextString(m) } func (*TaskQueueFetchTaskRequest) ProtoMessage() {} func (m *TaskQueueFetchTaskRequest) GetAppId() []byte { if m != nil { return m.AppId } return nil } func (m *TaskQueueFetchTaskRequest) GetQueueName() []byte { if m != nil { return m.QueueName } return nil } func (m *TaskQueueFetchTaskRequest) GetTaskName() []byte { if m != nil { return m.TaskName } return nil } type TaskQueueFetchTaskResponse struct { Task *TaskQueueQueryTasksResponse `protobuf:"bytes,1,req,name=task" json:"task,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueFetchTaskResponse) Reset() { *m = TaskQueueFetchTaskResponse{} } func (m *TaskQueueFetchTaskResponse) String() string { return proto.CompactTextString(m) } func (*TaskQueueFetchTaskResponse) ProtoMessage() {} func (m *TaskQueueFetchTaskResponse) GetTask() *TaskQueueQueryTasksResponse { if m != nil { return m.Task } return nil } type TaskQueueUpdateStorageLimitRequest struct { AppId []byte `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` Limit *int64 `protobuf:"varint,2,req,name=limit" json:"limit,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueUpdateStorageLimitRequest) Reset() { *m = TaskQueueUpdateStorageLimitRequest{} } func (m *TaskQueueUpdateStorageLimitRequest) String() string { return proto.CompactTextString(m) } func (*TaskQueueUpdateStorageLimitRequest) ProtoMessage() {} func (m *TaskQueueUpdateStorageLimitRequest) GetAppId() []byte { if m != nil { return m.AppId } return nil } func (m *TaskQueueUpdateStorageLimitRequest) GetLimit() int64 { if m != nil && m.Limit != nil { return *m.Limit } return 0 } type TaskQueueUpdateStorageLimitResponse struct { NewLimit *int64 `protobuf:"varint,1,req,name=new_limit" json:"new_limit,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueUpdateStorageLimitResponse) Reset() { *m = TaskQueueUpdateStorageLimitResponse{} } func (m *TaskQueueUpdateStorageLimitResponse) String() string { return proto.CompactTextString(m) } func (*TaskQueueUpdateStorageLimitResponse) ProtoMessage() {} func (m *TaskQueueUpdateStorageLimitResponse) GetNewLimit() int64 { if m != nil && m.NewLimit != nil { return *m.NewLimit } return 0 } type TaskQueueQueryAndOwnTasksRequest struct { QueueName []byte `protobuf:"bytes,1,req,name=queue_name" json:"queue_name,omitempty"` LeaseSeconds *float64 `protobuf:"fixed64,2,req,name=lease_seconds" json:"lease_seconds,omitempty"` MaxTasks *int64 `protobuf:"varint,3,req,name=max_tasks" json:"max_tasks,omitempty"` GroupByTag *bool `protobuf:"varint,4,opt,name=group_by_tag,def=0" json:"group_by_tag,omitempty"` Tag []byte `protobuf:"bytes,5,opt,name=tag" json:"tag,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueQueryAndOwnTasksRequest) Reset() { *m = TaskQueueQueryAndOwnTasksRequest{} } func (m *TaskQueueQueryAndOwnTasksRequest) String() string { return proto.CompactTextString(m) } func (*TaskQueueQueryAndOwnTasksRequest) ProtoMessage() {} const Default_TaskQueueQueryAndOwnTasksRequest_GroupByTag bool = false func (m *TaskQueueQueryAndOwnTasksRequest) GetQueueName() []byte { if m != nil { return m.QueueName } return nil } func (m *TaskQueueQueryAndOwnTasksRequest) GetLeaseSeconds() float64 { if m != nil && m.LeaseSeconds != nil { return *m.LeaseSeconds } return 0 } func (m *TaskQueueQueryAndOwnTasksRequest) GetMaxTasks() int64 { if m != nil && m.MaxTasks != nil { return *m.MaxTasks } return 0 } func (m *TaskQueueQueryAndOwnTasksRequest) GetGroupByTag() bool { if m != nil && m.GroupByTag != nil { return *m.GroupByTag } return Default_TaskQueueQueryAndOwnTasksRequest_GroupByTag } func (m *TaskQueueQueryAndOwnTasksRequest) GetTag() []byte { if m != nil { return m.Tag } return nil } type TaskQueueQueryAndOwnTasksResponse struct { Task []*TaskQueueQueryAndOwnTasksResponse_Task `protobuf:"group,1,rep,name=Task" json:"task,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueQueryAndOwnTasksResponse) Reset() { *m = TaskQueueQueryAndOwnTasksResponse{} } func (m *TaskQueueQueryAndOwnTasksResponse) String() string { return proto.CompactTextString(m) } func (*TaskQueueQueryAndOwnTasksResponse) ProtoMessage() {} func (m *TaskQueueQueryAndOwnTasksResponse) GetTask() []*TaskQueueQueryAndOwnTasksResponse_Task { if m != nil { return m.Task } return nil } type TaskQueueQueryAndOwnTasksResponse_Task struct { TaskName []byte `protobuf:"bytes,2,req,name=task_name" json:"task_name,omitempty"` EtaUsec *int64 `protobuf:"varint,3,req,name=eta_usec" json:"eta_usec,omitempty"` RetryCount *int32 `protobuf:"varint,4,opt,name=retry_count,def=0" json:"retry_count,omitempty"` Body []byte `protobuf:"bytes,5,opt,name=body" json:"body,omitempty"` Tag []byte `protobuf:"bytes,6,opt,name=tag" json:"tag,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueQueryAndOwnTasksResponse_Task) Reset() { *m = TaskQueueQueryAndOwnTasksResponse_Task{} } func (m *TaskQueueQueryAndOwnTasksResponse_Task) String() string { return proto.CompactTextString(m) } func (*TaskQueueQueryAndOwnTasksResponse_Task) ProtoMessage() {} const Default_TaskQueueQueryAndOwnTasksResponse_Task_RetryCount int32 = 0 func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetTaskName() []byte { if m != nil { return m.TaskName } return nil } func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetEtaUsec() int64 { if m != nil && m.EtaUsec != nil { return *m.EtaUsec } return 0 } func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetRetryCount() int32 { if m != nil && m.RetryCount != nil { return *m.RetryCount } return Default_TaskQueueQueryAndOwnTasksResponse_Task_RetryCount } func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetBody() []byte { if m != nil { return m.Body } return nil } func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetTag() []byte { if m != nil { return m.Tag } return nil } type TaskQueueModifyTaskLeaseRequest struct { QueueName []byte `protobuf:"bytes,1,req,name=queue_name" json:"queue_name,omitempty"` TaskName []byte `protobuf:"bytes,2,req,name=task_name" json:"task_name,omitempty"` EtaUsec *int64 `protobuf:"varint,3,req,name=eta_usec" json:"eta_usec,omitempty"` LeaseSeconds *float64 `protobuf:"fixed64,4,req,name=lease_seconds" json:"lease_seconds,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueModifyTaskLeaseRequest) Reset() { *m = TaskQueueModifyTaskLeaseRequest{} } func (m *TaskQueueModifyTaskLeaseRequest) String() string { return proto.CompactTextString(m) } func (*TaskQueueModifyTaskLeaseRequest) ProtoMessage() {} func (m *TaskQueueModifyTaskLeaseRequest) GetQueueName() []byte { if m != nil { return m.QueueName } return nil } func (m *TaskQueueModifyTaskLeaseRequest) GetTaskName() []byte { if m != nil { return m.TaskName } return nil } func (m *TaskQueueModifyTaskLeaseRequest) GetEtaUsec() int64 { if m != nil && m.EtaUsec != nil { return *m.EtaUsec } return 0 } func (m *TaskQueueModifyTaskLeaseRequest) GetLeaseSeconds() float64 { if m != nil && m.LeaseSeconds != nil { return *m.LeaseSeconds } return 0 } type TaskQueueModifyTaskLeaseResponse struct { UpdatedEtaUsec *int64 `protobuf:"varint,1,req,name=updated_eta_usec" json:"updated_eta_usec,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TaskQueueModifyTaskLeaseResponse) Reset() { *m = TaskQueueModifyTaskLeaseResponse{} } func (m *TaskQueueModifyTaskLeaseResponse) String() string { return proto.CompactTextString(m) } func (*TaskQueueModifyTaskLeaseResponse) ProtoMessage() {} func (m *TaskQueueModifyTaskLeaseResponse) GetUpdatedEtaUsec() int64 { if m != nil && m.UpdatedEtaUsec != nil { return *m.UpdatedEtaUsec } return 0 } func init() { } ================================================ FILE: vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto ================================================ syntax = "proto2"; option go_package = "taskqueue"; import "google.golang.org/appengine/internal/datastore/datastore_v3.proto"; package appengine; message TaskQueueServiceError { enum ErrorCode { OK = 0; UNKNOWN_QUEUE = 1; TRANSIENT_ERROR = 2; INTERNAL_ERROR = 3; TASK_TOO_LARGE = 4; INVALID_TASK_NAME = 5; INVALID_QUEUE_NAME = 6; INVALID_URL = 7; INVALID_QUEUE_RATE = 8; PERMISSION_DENIED = 9; TASK_ALREADY_EXISTS = 10; TOMBSTONED_TASK = 11; INVALID_ETA = 12; INVALID_REQUEST = 13; UNKNOWN_TASK = 14; TOMBSTONED_QUEUE = 15; DUPLICATE_TASK_NAME = 16; SKIPPED = 17; TOO_MANY_TASKS = 18; INVALID_PAYLOAD = 19; INVALID_RETRY_PARAMETERS = 20; INVALID_QUEUE_MODE = 21; ACL_LOOKUP_ERROR = 22; TRANSACTIONAL_REQUEST_TOO_LARGE = 23; INCORRECT_CREATOR_NAME = 24; TASK_LEASE_EXPIRED = 25; QUEUE_PAUSED = 26; INVALID_TAG = 27; // Reserved range for the Datastore error codes. // Original Datastore error code is shifted by DATASTORE_ERROR offset. DATASTORE_ERROR = 10000; } } message TaskPayload { extensions 10 to max; option message_set_wire_format = true; } message TaskQueueRetryParameters { optional int32 retry_limit = 1; optional int64 age_limit_sec = 2; optional double min_backoff_sec = 3 [default = 0.1]; optional double max_backoff_sec = 4 [default = 3600]; optional int32 max_doublings = 5 [default = 16]; } message TaskQueueAcl { repeated bytes user_email = 1; repeated bytes writer_email = 2; } message TaskQueueHttpHeader { required bytes key = 1; required bytes value = 2; } message TaskQueueMode { enum Mode { PUSH = 0; PULL = 1; } } message TaskQueueAddRequest { required bytes queue_name = 1; required bytes task_name = 2; required int64 eta_usec = 3; enum RequestMethod { GET = 1; POST = 2; HEAD = 3; PUT = 4; DELETE = 5; } optional RequestMethod method = 5 [default=POST]; optional bytes url = 4; repeated group Header = 6 { required bytes key = 7; required bytes value = 8; } optional bytes body = 9 [ctype=CORD]; optional Transaction transaction = 10; optional bytes app_id = 11; optional group CronTimetable = 12 { required bytes schedule = 13; required bytes timezone = 14; } optional bytes description = 15; optional TaskPayload payload = 16; optional TaskQueueRetryParameters retry_parameters = 17; optional TaskQueueMode.Mode mode = 18 [default=PUSH]; optional bytes tag = 19; } message TaskQueueAddResponse { optional bytes chosen_task_name = 1; } message TaskQueueBulkAddRequest { repeated TaskQueueAddRequest add_request = 1; } message TaskQueueBulkAddResponse { repeated group TaskResult = 1 { required TaskQueueServiceError.ErrorCode result = 2; optional bytes chosen_task_name = 3; } } message TaskQueueDeleteRequest { required bytes queue_name = 1; repeated bytes task_name = 2; optional bytes app_id = 3; } message TaskQueueDeleteResponse { repeated TaskQueueServiceError.ErrorCode result = 3; } message TaskQueueForceRunRequest { optional bytes app_id = 1; required bytes queue_name = 2; required bytes task_name = 3; } message TaskQueueForceRunResponse { required TaskQueueServiceError.ErrorCode result = 3; } message TaskQueueUpdateQueueRequest { optional bytes app_id = 1; required bytes queue_name = 2; required double bucket_refill_per_second = 3; required int32 bucket_capacity = 4; optional string user_specified_rate = 5; optional TaskQueueRetryParameters retry_parameters = 6; optional int32 max_concurrent_requests = 7; optional TaskQueueMode.Mode mode = 8 [default = PUSH]; optional TaskQueueAcl acl = 9; repeated TaskQueueHttpHeader header_override = 10; } message TaskQueueUpdateQueueResponse { } message TaskQueueFetchQueuesRequest { optional bytes app_id = 1; required int32 max_rows = 2; } message TaskQueueFetchQueuesResponse { repeated group Queue = 1 { required bytes queue_name = 2; required double bucket_refill_per_second = 3; required double bucket_capacity = 4; optional string user_specified_rate = 5; required bool paused = 6 [default=false]; optional TaskQueueRetryParameters retry_parameters = 7; optional int32 max_concurrent_requests = 8; optional TaskQueueMode.Mode mode = 9 [default = PUSH]; optional TaskQueueAcl acl = 10; repeated TaskQueueHttpHeader header_override = 11; optional string creator_name = 12 [ctype=CORD, default="apphosting"]; } } message TaskQueueFetchQueueStatsRequest { optional bytes app_id = 1; repeated bytes queue_name = 2; optional int32 max_num_tasks = 3 [default = 0]; } message TaskQueueScannerQueueInfo { required int64 executed_last_minute = 1; required int64 executed_last_hour = 2; required double sampling_duration_seconds = 3; optional int32 requests_in_flight = 4; optional double enforced_rate = 5; } message TaskQueueFetchQueueStatsResponse { repeated group QueueStats = 1 { required int32 num_tasks = 2; required int64 oldest_eta_usec = 3; optional TaskQueueScannerQueueInfo scanner_info = 4; } } message TaskQueuePauseQueueRequest { required bytes app_id = 1; required bytes queue_name = 2; required bool pause = 3; } message TaskQueuePauseQueueResponse { } message TaskQueuePurgeQueueRequest { optional bytes app_id = 1; required bytes queue_name = 2; } message TaskQueuePurgeQueueResponse { } message TaskQueueDeleteQueueRequest { required bytes app_id = 1; required bytes queue_name = 2; } message TaskQueueDeleteQueueResponse { } message TaskQueueDeleteGroupRequest { required bytes app_id = 1; } message TaskQueueDeleteGroupResponse { } message TaskQueueQueryTasksRequest { optional bytes app_id = 1; required bytes queue_name = 2; optional bytes start_task_name = 3; optional int64 start_eta_usec = 4; optional bytes start_tag = 6; optional int32 max_rows = 5 [default = 1]; } message TaskQueueQueryTasksResponse { repeated group Task = 1 { required bytes task_name = 2; required int64 eta_usec = 3; optional bytes url = 4; enum RequestMethod { GET = 1; POST = 2; HEAD = 3; PUT = 4; DELETE = 5; } optional RequestMethod method = 5; optional int32 retry_count = 6 [default=0]; repeated group Header = 7 { required bytes key = 8; required bytes value = 9; } optional int32 body_size = 10; optional bytes body = 11 [ctype=CORD]; required int64 creation_time_usec = 12; optional group CronTimetable = 13 { required bytes schedule = 14; required bytes timezone = 15; } optional group RunLog = 16 { required int64 dispatched_usec = 17; required int64 lag_usec = 18; required int64 elapsed_usec = 19; optional int64 response_code = 20; optional string retry_reason = 27; } optional bytes description = 21; optional TaskPayload payload = 22; optional TaskQueueRetryParameters retry_parameters = 23; optional int64 first_try_usec = 24; optional bytes tag = 25; optional int32 execution_count = 26 [default=0]; } } message TaskQueueFetchTaskRequest { optional bytes app_id = 1; required bytes queue_name = 2; required bytes task_name = 3; } message TaskQueueFetchTaskResponse { required TaskQueueQueryTasksResponse task = 1; } message TaskQueueUpdateStorageLimitRequest { required bytes app_id = 1; required int64 limit = 2; } message TaskQueueUpdateStorageLimitResponse { required int64 new_limit = 1; } message TaskQueueQueryAndOwnTasksRequest { required bytes queue_name = 1; required double lease_seconds = 2; required int64 max_tasks = 3; optional bool group_by_tag = 4 [default=false]; optional bytes tag = 5; } message TaskQueueQueryAndOwnTasksResponse { repeated group Task = 1 { required bytes task_name = 2; required int64 eta_usec = 3; optional int32 retry_count = 4 [default=0]; optional bytes body = 5 [ctype=CORD]; optional bytes tag = 6; } } message TaskQueueModifyTaskLeaseRequest { required bytes queue_name = 1; required bytes task_name = 2; required int64 eta_usec = 3; required double lease_seconds = 4; } message TaskQueueModifyTaskLeaseResponse { required int64 updated_eta_usec = 1; } ================================================ FILE: vendor/google.golang.org/appengine/internal/transaction.go ================================================ // Copyright 2014 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. package internal // This file implements hooks for applying datastore transactions. import ( "errors" "reflect" "github.com/golang/protobuf/proto" netcontext "golang.org/x/net/context" basepb "google.golang.org/appengine/internal/base" pb "google.golang.org/appengine/internal/datastore" ) var transactionSetters = make(map[reflect.Type]reflect.Value) // RegisterTransactionSetter registers a function that sets transaction information // in a protocol buffer message. f should be a function with two arguments, // the first being a protocol buffer type, and the second being *datastore.Transaction. func RegisterTransactionSetter(f interface{}) { v := reflect.ValueOf(f) transactionSetters[v.Type().In(0)] = v } // applyTransaction applies the transaction t to message pb // by using the relevant setter passed to RegisterTransactionSetter. func applyTransaction(pb proto.Message, t *pb.Transaction) { v := reflect.ValueOf(pb) if f, ok := transactionSetters[v.Type()]; ok { f.Call([]reflect.Value{v, reflect.ValueOf(t)}) } } var transactionKey = "used for *Transaction" func transactionFromContext(ctx netcontext.Context) *transaction { t, _ := ctx.Value(&transactionKey).(*transaction) return t } func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context { return netcontext.WithValue(ctx, &transactionKey, t) } type transaction struct { transaction pb.Transaction finished bool } var ErrConcurrentTransaction = errors.New("internal: concurrent transaction") func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool) error { if transactionFromContext(c) != nil { return errors.New("nested transactions are not supported") } // Begin the transaction. t := &transaction{} req := &pb.BeginTransactionRequest{ App: proto.String(FullyQualifiedAppID(c)), } if xg { req.AllowMultipleEg = proto.Bool(true) } if err := Call(c, "datastore_v3", "BeginTransaction", req, &t.transaction); err != nil { return err } // Call f, rolling back the transaction if f returns a non-nil error, or panics. // The panic is not recovered. defer func() { if t.finished { return } t.finished = true // Ignore the error return value, since we are already returning a non-nil // error (or we're panicking). Call(c, "datastore_v3", "Rollback", &t.transaction, &basepb.VoidProto{}) }() if err := f(withTransaction(c, t)); err != nil { return err } t.finished = true // Commit the transaction. res := &pb.CommitResponse{} err := Call(c, "datastore_v3", "Commit", &t.transaction, res) if ae, ok := err.(*APIError); ok { /* TODO: restore this conditional if appengine.IsDevAppServer() { */ // The Python Dev AppServer raises an ApplicationError with error code 2 (which is // Error.CONCURRENT_TRANSACTION) and message "Concurrency exception.". if ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == "ApplicationError: 2 Concurrency exception." { return ErrConcurrentTransaction } if ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) { return ErrConcurrentTransaction } } return err } ================================================ FILE: vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go ================================================ // Code generated by protoc-gen-go. // source: google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto // DO NOT EDIT! /* Package urlfetch is a generated protocol buffer package. It is generated from these files: google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto It has these top-level messages: URLFetchServiceError URLFetchRequest URLFetchResponse */ package urlfetch import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf type URLFetchServiceError_ErrorCode int32 const ( URLFetchServiceError_OK URLFetchServiceError_ErrorCode = 0 URLFetchServiceError_INVALID_URL URLFetchServiceError_ErrorCode = 1 URLFetchServiceError_FETCH_ERROR URLFetchServiceError_ErrorCode = 2 URLFetchServiceError_UNSPECIFIED_ERROR URLFetchServiceError_ErrorCode = 3 URLFetchServiceError_RESPONSE_TOO_LARGE URLFetchServiceError_ErrorCode = 4 URLFetchServiceError_DEADLINE_EXCEEDED URLFetchServiceError_ErrorCode = 5 URLFetchServiceError_SSL_CERTIFICATE_ERROR URLFetchServiceError_ErrorCode = 6 URLFetchServiceError_DNS_ERROR URLFetchServiceError_ErrorCode = 7 URLFetchServiceError_CLOSED URLFetchServiceError_ErrorCode = 8 URLFetchServiceError_INTERNAL_TRANSIENT_ERROR URLFetchServiceError_ErrorCode = 9 URLFetchServiceError_TOO_MANY_REDIRECTS URLFetchServiceError_ErrorCode = 10 URLFetchServiceError_MALFORMED_REPLY URLFetchServiceError_ErrorCode = 11 URLFetchServiceError_CONNECTION_ERROR URLFetchServiceError_ErrorCode = 12 ) var URLFetchServiceError_ErrorCode_name = map[int32]string{ 0: "OK", 1: "INVALID_URL", 2: "FETCH_ERROR", 3: "UNSPECIFIED_ERROR", 4: "RESPONSE_TOO_LARGE", 5: "DEADLINE_EXCEEDED", 6: "SSL_CERTIFICATE_ERROR", 7: "DNS_ERROR", 8: "CLOSED", 9: "INTERNAL_TRANSIENT_ERROR", 10: "TOO_MANY_REDIRECTS", 11: "MALFORMED_REPLY", 12: "CONNECTION_ERROR", } var URLFetchServiceError_ErrorCode_value = map[string]int32{ "OK": 0, "INVALID_URL": 1, "FETCH_ERROR": 2, "UNSPECIFIED_ERROR": 3, "RESPONSE_TOO_LARGE": 4, "DEADLINE_EXCEEDED": 5, "SSL_CERTIFICATE_ERROR": 6, "DNS_ERROR": 7, "CLOSED": 8, "INTERNAL_TRANSIENT_ERROR": 9, "TOO_MANY_REDIRECTS": 10, "MALFORMED_REPLY": 11, "CONNECTION_ERROR": 12, } func (x URLFetchServiceError_ErrorCode) Enum() *URLFetchServiceError_ErrorCode { p := new(URLFetchServiceError_ErrorCode) *p = x return p } func (x URLFetchServiceError_ErrorCode) String() string { return proto.EnumName(URLFetchServiceError_ErrorCode_name, int32(x)) } func (x *URLFetchServiceError_ErrorCode) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(URLFetchServiceError_ErrorCode_value, data, "URLFetchServiceError_ErrorCode") if err != nil { return err } *x = URLFetchServiceError_ErrorCode(value) return nil } type URLFetchRequest_RequestMethod int32 const ( URLFetchRequest_GET URLFetchRequest_RequestMethod = 1 URLFetchRequest_POST URLFetchRequest_RequestMethod = 2 URLFetchRequest_HEAD URLFetchRequest_RequestMethod = 3 URLFetchRequest_PUT URLFetchRequest_RequestMethod = 4 URLFetchRequest_DELETE URLFetchRequest_RequestMethod = 5 URLFetchRequest_PATCH URLFetchRequest_RequestMethod = 6 ) var URLFetchRequest_RequestMethod_name = map[int32]string{ 1: "GET", 2: "POST", 3: "HEAD", 4: "PUT", 5: "DELETE", 6: "PATCH", } var URLFetchRequest_RequestMethod_value = map[string]int32{ "GET": 1, "POST": 2, "HEAD": 3, "PUT": 4, "DELETE": 5, "PATCH": 6, } func (x URLFetchRequest_RequestMethod) Enum() *URLFetchRequest_RequestMethod { p := new(URLFetchRequest_RequestMethod) *p = x return p } func (x URLFetchRequest_RequestMethod) String() string { return proto.EnumName(URLFetchRequest_RequestMethod_name, int32(x)) } func (x *URLFetchRequest_RequestMethod) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(URLFetchRequest_RequestMethod_value, data, "URLFetchRequest_RequestMethod") if err != nil { return err } *x = URLFetchRequest_RequestMethod(value) return nil } type URLFetchServiceError struct { XXX_unrecognized []byte `json:"-"` } func (m *URLFetchServiceError) Reset() { *m = URLFetchServiceError{} } func (m *URLFetchServiceError) String() string { return proto.CompactTextString(m) } func (*URLFetchServiceError) ProtoMessage() {} type URLFetchRequest struct { Method *URLFetchRequest_RequestMethod `protobuf:"varint,1,req,name=Method,enum=appengine.URLFetchRequest_RequestMethod" json:"Method,omitempty"` Url *string `protobuf:"bytes,2,req,name=Url" json:"Url,omitempty"` Header []*URLFetchRequest_Header `protobuf:"group,3,rep,name=Header" json:"header,omitempty"` Payload []byte `protobuf:"bytes,6,opt,name=Payload" json:"Payload,omitempty"` FollowRedirects *bool `protobuf:"varint,7,opt,name=FollowRedirects,def=1" json:"FollowRedirects,omitempty"` Deadline *float64 `protobuf:"fixed64,8,opt,name=Deadline" json:"Deadline,omitempty"` MustValidateServerCertificate *bool `protobuf:"varint,9,opt,name=MustValidateServerCertificate,def=1" json:"MustValidateServerCertificate,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *URLFetchRequest) Reset() { *m = URLFetchRequest{} } func (m *URLFetchRequest) String() string { return proto.CompactTextString(m) } func (*URLFetchRequest) ProtoMessage() {} const Default_URLFetchRequest_FollowRedirects bool = true const Default_URLFetchRequest_MustValidateServerCertificate bool = true func (m *URLFetchRequest) GetMethod() URLFetchRequest_RequestMethod { if m != nil && m.Method != nil { return *m.Method } return URLFetchRequest_GET } func (m *URLFetchRequest) GetUrl() string { if m != nil && m.Url != nil { return *m.Url } return "" } func (m *URLFetchRequest) GetHeader() []*URLFetchRequest_Header { if m != nil { return m.Header } return nil } func (m *URLFetchRequest) GetPayload() []byte { if m != nil { return m.Payload } return nil } func (m *URLFetchRequest) GetFollowRedirects() bool { if m != nil && m.FollowRedirects != nil { return *m.FollowRedirects } return Default_URLFetchRequest_FollowRedirects } func (m *URLFetchRequest) GetDeadline() float64 { if m != nil && m.Deadline != nil { return *m.Deadline } return 0 } func (m *URLFetchRequest) GetMustValidateServerCertificate() bool { if m != nil && m.MustValidateServerCertificate != nil { return *m.MustValidateServerCertificate } return Default_URLFetchRequest_MustValidateServerCertificate } type URLFetchRequest_Header struct { Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"` Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *URLFetchRequest_Header) Reset() { *m = URLFetchRequest_Header{} } func (m *URLFetchRequest_Header) String() string { return proto.CompactTextString(m) } func (*URLFetchRequest_Header) ProtoMessage() {} func (m *URLFetchRequest_Header) GetKey() string { if m != nil && m.Key != nil { return *m.Key } return "" } func (m *URLFetchRequest_Header) GetValue() string { if m != nil && m.Value != nil { return *m.Value } return "" } type URLFetchResponse struct { Content []byte `protobuf:"bytes,1,opt,name=Content" json:"Content,omitempty"` StatusCode *int32 `protobuf:"varint,2,req,name=StatusCode" json:"StatusCode,omitempty"` Header []*URLFetchResponse_Header `protobuf:"group,3,rep,name=Header" json:"header,omitempty"` ContentWasTruncated *bool `protobuf:"varint,6,opt,name=ContentWasTruncated,def=0" json:"ContentWasTruncated,omitempty"` ExternalBytesSent *int64 `protobuf:"varint,7,opt,name=ExternalBytesSent" json:"ExternalBytesSent,omitempty"` ExternalBytesReceived *int64 `protobuf:"varint,8,opt,name=ExternalBytesReceived" json:"ExternalBytesReceived,omitempty"` FinalUrl *string `protobuf:"bytes,9,opt,name=FinalUrl" json:"FinalUrl,omitempty"` ApiCpuMilliseconds *int64 `protobuf:"varint,10,opt,name=ApiCpuMilliseconds,def=0" json:"ApiCpuMilliseconds,omitempty"` ApiBytesSent *int64 `protobuf:"varint,11,opt,name=ApiBytesSent,def=0" json:"ApiBytesSent,omitempty"` ApiBytesReceived *int64 `protobuf:"varint,12,opt,name=ApiBytesReceived,def=0" json:"ApiBytesReceived,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *URLFetchResponse) Reset() { *m = URLFetchResponse{} } func (m *URLFetchResponse) String() string { return proto.CompactTextString(m) } func (*URLFetchResponse) ProtoMessage() {} const Default_URLFetchResponse_ContentWasTruncated bool = false const Default_URLFetchResponse_ApiCpuMilliseconds int64 = 0 const Default_URLFetchResponse_ApiBytesSent int64 = 0 const Default_URLFetchResponse_ApiBytesReceived int64 = 0 func (m *URLFetchResponse) GetContent() []byte { if m != nil { return m.Content } return nil } func (m *URLFetchResponse) GetStatusCode() int32 { if m != nil && m.StatusCode != nil { return *m.StatusCode } return 0 } func (m *URLFetchResponse) GetHeader() []*URLFetchResponse_Header { if m != nil { return m.Header } return nil } func (m *URLFetchResponse) GetContentWasTruncated() bool { if m != nil && m.ContentWasTruncated != nil { return *m.ContentWasTruncated } return Default_URLFetchResponse_ContentWasTruncated } func (m *URLFetchResponse) GetExternalBytesSent() int64 { if m != nil && m.ExternalBytesSent != nil { return *m.ExternalBytesSent } return 0 } func (m *URLFetchResponse) GetExternalBytesReceived() int64 { if m != nil && m.ExternalBytesReceived != nil { return *m.ExternalBytesReceived } return 0 } func (m *URLFetchResponse) GetFinalUrl() string { if m != nil && m.FinalUrl != nil { return *m.FinalUrl } return "" } func (m *URLFetchResponse) GetApiCpuMilliseconds() int64 { if m != nil && m.ApiCpuMilliseconds != nil { return *m.ApiCpuMilliseconds } return Default_URLFetchResponse_ApiCpuMilliseconds } func (m *URLFetchResponse) GetApiBytesSent() int64 { if m != nil && m.ApiBytesSent != nil { return *m.ApiBytesSent } return Default_URLFetchResponse_ApiBytesSent } func (m *URLFetchResponse) GetApiBytesReceived() int64 { if m != nil && m.ApiBytesReceived != nil { return *m.ApiBytesReceived } return Default_URLFetchResponse_ApiBytesReceived } type URLFetchResponse_Header struct { Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"` Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *URLFetchResponse_Header) Reset() { *m = URLFetchResponse_Header{} } func (m *URLFetchResponse_Header) String() string { return proto.CompactTextString(m) } func (*URLFetchResponse_Header) ProtoMessage() {} func (m *URLFetchResponse_Header) GetKey() string { if m != nil && m.Key != nil { return *m.Key } return "" } func (m *URLFetchResponse_Header) GetValue() string { if m != nil && m.Value != nil { return *m.Value } return "" } func init() { } ================================================ FILE: vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto ================================================ syntax = "proto2"; option go_package = "urlfetch"; package appengine; message URLFetchServiceError { enum ErrorCode { OK = 0; INVALID_URL = 1; FETCH_ERROR = 2; UNSPECIFIED_ERROR = 3; RESPONSE_TOO_LARGE = 4; DEADLINE_EXCEEDED = 5; SSL_CERTIFICATE_ERROR = 6; DNS_ERROR = 7; CLOSED = 8; INTERNAL_TRANSIENT_ERROR = 9; TOO_MANY_REDIRECTS = 10; MALFORMED_REPLY = 11; CONNECTION_ERROR = 12; } } message URLFetchRequest { enum RequestMethod { GET = 1; POST = 2; HEAD = 3; PUT = 4; DELETE = 5; PATCH = 6; } required RequestMethod Method = 1; required string Url = 2; repeated group Header = 3 { required string Key = 4; required string Value = 5; } optional bytes Payload = 6 [ctype=CORD]; optional bool FollowRedirects = 7 [default=true]; optional double Deadline = 8; optional bool MustValidateServerCertificate = 9 [default=true]; } message URLFetchResponse { optional bytes Content = 1; required int32 StatusCode = 2; repeated group Header = 3 { required string Key = 4; required string Value = 5; } optional bool ContentWasTruncated = 6 [default=false]; optional int64 ExternalBytesSent = 7; optional int64 ExternalBytesReceived = 8; optional string FinalUrl = 9; optional int64 ApiCpuMilliseconds = 10 [default=0]; optional int64 ApiBytesSent = 11 [default=0]; optional int64 ApiBytesReceived = 12 [default=0]; } ================================================ FILE: vendor/google.golang.org/appengine/internal/user/user_service.pb.go ================================================ // Code generated by protoc-gen-go. // source: google.golang.org/appengine/internal/user/user_service.proto // DO NOT EDIT! /* Package user is a generated protocol buffer package. It is generated from these files: google.golang.org/appengine/internal/user/user_service.proto It has these top-level messages: UserServiceError CreateLoginURLRequest CreateLoginURLResponse CreateLogoutURLRequest CreateLogoutURLResponse GetOAuthUserRequest GetOAuthUserResponse CheckOAuthSignatureRequest CheckOAuthSignatureResponse */ package user import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf type UserServiceError_ErrorCode int32 const ( UserServiceError_OK UserServiceError_ErrorCode = 0 UserServiceError_REDIRECT_URL_TOO_LONG UserServiceError_ErrorCode = 1 UserServiceError_NOT_ALLOWED UserServiceError_ErrorCode = 2 UserServiceError_OAUTH_INVALID_TOKEN UserServiceError_ErrorCode = 3 UserServiceError_OAUTH_INVALID_REQUEST UserServiceError_ErrorCode = 4 UserServiceError_OAUTH_ERROR UserServiceError_ErrorCode = 5 ) var UserServiceError_ErrorCode_name = map[int32]string{ 0: "OK", 1: "REDIRECT_URL_TOO_LONG", 2: "NOT_ALLOWED", 3: "OAUTH_INVALID_TOKEN", 4: "OAUTH_INVALID_REQUEST", 5: "OAUTH_ERROR", } var UserServiceError_ErrorCode_value = map[string]int32{ "OK": 0, "REDIRECT_URL_TOO_LONG": 1, "NOT_ALLOWED": 2, "OAUTH_INVALID_TOKEN": 3, "OAUTH_INVALID_REQUEST": 4, "OAUTH_ERROR": 5, } func (x UserServiceError_ErrorCode) Enum() *UserServiceError_ErrorCode { p := new(UserServiceError_ErrorCode) *p = x return p } func (x UserServiceError_ErrorCode) String() string { return proto.EnumName(UserServiceError_ErrorCode_name, int32(x)) } func (x *UserServiceError_ErrorCode) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(UserServiceError_ErrorCode_value, data, "UserServiceError_ErrorCode") if err != nil { return err } *x = UserServiceError_ErrorCode(value) return nil } type UserServiceError struct { XXX_unrecognized []byte `json:"-"` } func (m *UserServiceError) Reset() { *m = UserServiceError{} } func (m *UserServiceError) String() string { return proto.CompactTextString(m) } func (*UserServiceError) ProtoMessage() {} type CreateLoginURLRequest struct { DestinationUrl *string `protobuf:"bytes,1,req,name=destination_url" json:"destination_url,omitempty"` AuthDomain *string `protobuf:"bytes,2,opt,name=auth_domain" json:"auth_domain,omitempty"` FederatedIdentity *string `protobuf:"bytes,3,opt,name=federated_identity,def=" json:"federated_identity,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *CreateLoginURLRequest) Reset() { *m = CreateLoginURLRequest{} } func (m *CreateLoginURLRequest) String() string { return proto.CompactTextString(m) } func (*CreateLoginURLRequest) ProtoMessage() {} func (m *CreateLoginURLRequest) GetDestinationUrl() string { if m != nil && m.DestinationUrl != nil { return *m.DestinationUrl } return "" } func (m *CreateLoginURLRequest) GetAuthDomain() string { if m != nil && m.AuthDomain != nil { return *m.AuthDomain } return "" } func (m *CreateLoginURLRequest) GetFederatedIdentity() string { if m != nil && m.FederatedIdentity != nil { return *m.FederatedIdentity } return "" } type CreateLoginURLResponse struct { LoginUrl *string `protobuf:"bytes,1,req,name=login_url" json:"login_url,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *CreateLoginURLResponse) Reset() { *m = CreateLoginURLResponse{} } func (m *CreateLoginURLResponse) String() string { return proto.CompactTextString(m) } func (*CreateLoginURLResponse) ProtoMessage() {} func (m *CreateLoginURLResponse) GetLoginUrl() string { if m != nil && m.LoginUrl != nil { return *m.LoginUrl } return "" } type CreateLogoutURLRequest struct { DestinationUrl *string `protobuf:"bytes,1,req,name=destination_url" json:"destination_url,omitempty"` AuthDomain *string `protobuf:"bytes,2,opt,name=auth_domain" json:"auth_domain,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *CreateLogoutURLRequest) Reset() { *m = CreateLogoutURLRequest{} } func (m *CreateLogoutURLRequest) String() string { return proto.CompactTextString(m) } func (*CreateLogoutURLRequest) ProtoMessage() {} func (m *CreateLogoutURLRequest) GetDestinationUrl() string { if m != nil && m.DestinationUrl != nil { return *m.DestinationUrl } return "" } func (m *CreateLogoutURLRequest) GetAuthDomain() string { if m != nil && m.AuthDomain != nil { return *m.AuthDomain } return "" } type CreateLogoutURLResponse struct { LogoutUrl *string `protobuf:"bytes,1,req,name=logout_url" json:"logout_url,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *CreateLogoutURLResponse) Reset() { *m = CreateLogoutURLResponse{} } func (m *CreateLogoutURLResponse) String() string { return proto.CompactTextString(m) } func (*CreateLogoutURLResponse) ProtoMessage() {} func (m *CreateLogoutURLResponse) GetLogoutUrl() string { if m != nil && m.LogoutUrl != nil { return *m.LogoutUrl } return "" } type GetOAuthUserRequest struct { Scope *string `protobuf:"bytes,1,opt,name=scope" json:"scope,omitempty"` Scopes []string `protobuf:"bytes,2,rep,name=scopes" json:"scopes,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *GetOAuthUserRequest) Reset() { *m = GetOAuthUserRequest{} } func (m *GetOAuthUserRequest) String() string { return proto.CompactTextString(m) } func (*GetOAuthUserRequest) ProtoMessage() {} func (m *GetOAuthUserRequest) GetScope() string { if m != nil && m.Scope != nil { return *m.Scope } return "" } func (m *GetOAuthUserRequest) GetScopes() []string { if m != nil { return m.Scopes } return nil } type GetOAuthUserResponse struct { Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"` UserId *string `protobuf:"bytes,2,req,name=user_id" json:"user_id,omitempty"` AuthDomain *string `protobuf:"bytes,3,req,name=auth_domain" json:"auth_domain,omitempty"` UserOrganization *string `protobuf:"bytes,4,opt,name=user_organization,def=" json:"user_organization,omitempty"` IsAdmin *bool `protobuf:"varint,5,opt,name=is_admin,def=0" json:"is_admin,omitempty"` ClientId *string `protobuf:"bytes,6,opt,name=client_id,def=" json:"client_id,omitempty"` Scopes []string `protobuf:"bytes,7,rep,name=scopes" json:"scopes,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *GetOAuthUserResponse) Reset() { *m = GetOAuthUserResponse{} } func (m *GetOAuthUserResponse) String() string { return proto.CompactTextString(m) } func (*GetOAuthUserResponse) ProtoMessage() {} const Default_GetOAuthUserResponse_IsAdmin bool = false func (m *GetOAuthUserResponse) GetEmail() string { if m != nil && m.Email != nil { return *m.Email } return "" } func (m *GetOAuthUserResponse) GetUserId() string { if m != nil && m.UserId != nil { return *m.UserId } return "" } func (m *GetOAuthUserResponse) GetAuthDomain() string { if m != nil && m.AuthDomain != nil { return *m.AuthDomain } return "" } func (m *GetOAuthUserResponse) GetUserOrganization() string { if m != nil && m.UserOrganization != nil { return *m.UserOrganization } return "" } func (m *GetOAuthUserResponse) GetIsAdmin() bool { if m != nil && m.IsAdmin != nil { return *m.IsAdmin } return Default_GetOAuthUserResponse_IsAdmin } func (m *GetOAuthUserResponse) GetClientId() string { if m != nil && m.ClientId != nil { return *m.ClientId } return "" } func (m *GetOAuthUserResponse) GetScopes() []string { if m != nil { return m.Scopes } return nil } type CheckOAuthSignatureRequest struct { XXX_unrecognized []byte `json:"-"` } func (m *CheckOAuthSignatureRequest) Reset() { *m = CheckOAuthSignatureRequest{} } func (m *CheckOAuthSignatureRequest) String() string { return proto.CompactTextString(m) } func (*CheckOAuthSignatureRequest) ProtoMessage() {} type CheckOAuthSignatureResponse struct { OauthConsumerKey *string `protobuf:"bytes,1,req,name=oauth_consumer_key" json:"oauth_consumer_key,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *CheckOAuthSignatureResponse) Reset() { *m = CheckOAuthSignatureResponse{} } func (m *CheckOAuthSignatureResponse) String() string { return proto.CompactTextString(m) } func (*CheckOAuthSignatureResponse) ProtoMessage() {} func (m *CheckOAuthSignatureResponse) GetOauthConsumerKey() string { if m != nil && m.OauthConsumerKey != nil { return *m.OauthConsumerKey } return "" } func init() { } ================================================ FILE: vendor/google.golang.org/appengine/internal/user/user_service.proto ================================================ syntax = "proto2"; option go_package = "user"; package appengine; message UserServiceError { enum ErrorCode { OK = 0; REDIRECT_URL_TOO_LONG = 1; NOT_ALLOWED = 2; OAUTH_INVALID_TOKEN = 3; OAUTH_INVALID_REQUEST = 4; OAUTH_ERROR = 5; } } message CreateLoginURLRequest { required string destination_url = 1; optional string auth_domain = 2; optional string federated_identity = 3 [default = ""]; } message CreateLoginURLResponse { required string login_url = 1; } message CreateLogoutURLRequest { required string destination_url = 1; optional string auth_domain = 2; } message CreateLogoutURLResponse { required string logout_url = 1; } message GetOAuthUserRequest { optional string scope = 1; repeated string scopes = 2; } message GetOAuthUserResponse { required string email = 1; required string user_id = 2; required string auth_domain = 3; optional string user_organization = 4 [default = ""]; optional bool is_admin = 5 [default = false]; optional string client_id = 6 [default = ""]; repeated string scopes = 7; } message CheckOAuthSignatureRequest { } message CheckOAuthSignatureResponse { required string oauth_consumer_key = 1; } ================================================ FILE: vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.pb.go ================================================ // Code generated by protoc-gen-go. // source: google.golang.org/appengine/internal/xmpp/xmpp_service.proto // DO NOT EDIT! /* Package xmpp is a generated protocol buffer package. It is generated from these files: google.golang.org/appengine/internal/xmpp/xmpp_service.proto It has these top-level messages: XmppServiceError PresenceRequest PresenceResponse BulkPresenceRequest BulkPresenceResponse XmppMessageRequest XmppMessageResponse XmppSendPresenceRequest XmppSendPresenceResponse XmppInviteRequest XmppInviteResponse */ package xmpp import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf type XmppServiceError_ErrorCode int32 const ( XmppServiceError_UNSPECIFIED_ERROR XmppServiceError_ErrorCode = 1 XmppServiceError_INVALID_JID XmppServiceError_ErrorCode = 2 XmppServiceError_NO_BODY XmppServiceError_ErrorCode = 3 XmppServiceError_INVALID_XML XmppServiceError_ErrorCode = 4 XmppServiceError_INVALID_TYPE XmppServiceError_ErrorCode = 5 XmppServiceError_INVALID_SHOW XmppServiceError_ErrorCode = 6 XmppServiceError_EXCEEDED_MAX_SIZE XmppServiceError_ErrorCode = 7 XmppServiceError_APPID_ALIAS_REQUIRED XmppServiceError_ErrorCode = 8 XmppServiceError_NONDEFAULT_MODULE XmppServiceError_ErrorCode = 9 ) var XmppServiceError_ErrorCode_name = map[int32]string{ 1: "UNSPECIFIED_ERROR", 2: "INVALID_JID", 3: "NO_BODY", 4: "INVALID_XML", 5: "INVALID_TYPE", 6: "INVALID_SHOW", 7: "EXCEEDED_MAX_SIZE", 8: "APPID_ALIAS_REQUIRED", 9: "NONDEFAULT_MODULE", } var XmppServiceError_ErrorCode_value = map[string]int32{ "UNSPECIFIED_ERROR": 1, "INVALID_JID": 2, "NO_BODY": 3, "INVALID_XML": 4, "INVALID_TYPE": 5, "INVALID_SHOW": 6, "EXCEEDED_MAX_SIZE": 7, "APPID_ALIAS_REQUIRED": 8, "NONDEFAULT_MODULE": 9, } func (x XmppServiceError_ErrorCode) Enum() *XmppServiceError_ErrorCode { p := new(XmppServiceError_ErrorCode) *p = x return p } func (x XmppServiceError_ErrorCode) String() string { return proto.EnumName(XmppServiceError_ErrorCode_name, int32(x)) } func (x *XmppServiceError_ErrorCode) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(XmppServiceError_ErrorCode_value, data, "XmppServiceError_ErrorCode") if err != nil { return err } *x = XmppServiceError_ErrorCode(value) return nil } type PresenceResponse_SHOW int32 const ( PresenceResponse_NORMAL PresenceResponse_SHOW = 0 PresenceResponse_AWAY PresenceResponse_SHOW = 1 PresenceResponse_DO_NOT_DISTURB PresenceResponse_SHOW = 2 PresenceResponse_CHAT PresenceResponse_SHOW = 3 PresenceResponse_EXTENDED_AWAY PresenceResponse_SHOW = 4 ) var PresenceResponse_SHOW_name = map[int32]string{ 0: "NORMAL", 1: "AWAY", 2: "DO_NOT_DISTURB", 3: "CHAT", 4: "EXTENDED_AWAY", } var PresenceResponse_SHOW_value = map[string]int32{ "NORMAL": 0, "AWAY": 1, "DO_NOT_DISTURB": 2, "CHAT": 3, "EXTENDED_AWAY": 4, } func (x PresenceResponse_SHOW) Enum() *PresenceResponse_SHOW { p := new(PresenceResponse_SHOW) *p = x return p } func (x PresenceResponse_SHOW) String() string { return proto.EnumName(PresenceResponse_SHOW_name, int32(x)) } func (x *PresenceResponse_SHOW) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(PresenceResponse_SHOW_value, data, "PresenceResponse_SHOW") if err != nil { return err } *x = PresenceResponse_SHOW(value) return nil } type XmppMessageResponse_XmppMessageStatus int32 const ( XmppMessageResponse_NO_ERROR XmppMessageResponse_XmppMessageStatus = 0 XmppMessageResponse_INVALID_JID XmppMessageResponse_XmppMessageStatus = 1 XmppMessageResponse_OTHER_ERROR XmppMessageResponse_XmppMessageStatus = 2 ) var XmppMessageResponse_XmppMessageStatus_name = map[int32]string{ 0: "NO_ERROR", 1: "INVALID_JID", 2: "OTHER_ERROR", } var XmppMessageResponse_XmppMessageStatus_value = map[string]int32{ "NO_ERROR": 0, "INVALID_JID": 1, "OTHER_ERROR": 2, } func (x XmppMessageResponse_XmppMessageStatus) Enum() *XmppMessageResponse_XmppMessageStatus { p := new(XmppMessageResponse_XmppMessageStatus) *p = x return p } func (x XmppMessageResponse_XmppMessageStatus) String() string { return proto.EnumName(XmppMessageResponse_XmppMessageStatus_name, int32(x)) } func (x *XmppMessageResponse_XmppMessageStatus) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(XmppMessageResponse_XmppMessageStatus_value, data, "XmppMessageResponse_XmppMessageStatus") if err != nil { return err } *x = XmppMessageResponse_XmppMessageStatus(value) return nil } type XmppServiceError struct { XXX_unrecognized []byte `json:"-"` } func (m *XmppServiceError) Reset() { *m = XmppServiceError{} } func (m *XmppServiceError) String() string { return proto.CompactTextString(m) } func (*XmppServiceError) ProtoMessage() {} type PresenceRequest struct { Jid *string `protobuf:"bytes,1,req,name=jid" json:"jid,omitempty"` FromJid *string `protobuf:"bytes,2,opt,name=from_jid" json:"from_jid,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *PresenceRequest) Reset() { *m = PresenceRequest{} } func (m *PresenceRequest) String() string { return proto.CompactTextString(m) } func (*PresenceRequest) ProtoMessage() {} func (m *PresenceRequest) GetJid() string { if m != nil && m.Jid != nil { return *m.Jid } return "" } func (m *PresenceRequest) GetFromJid() string { if m != nil && m.FromJid != nil { return *m.FromJid } return "" } type PresenceResponse struct { IsAvailable *bool `protobuf:"varint,1,req,name=is_available" json:"is_available,omitempty"` Presence *PresenceResponse_SHOW `protobuf:"varint,2,opt,name=presence,enum=appengine.PresenceResponse_SHOW" json:"presence,omitempty"` Valid *bool `protobuf:"varint,3,opt,name=valid" json:"valid,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *PresenceResponse) Reset() { *m = PresenceResponse{} } func (m *PresenceResponse) String() string { return proto.CompactTextString(m) } func (*PresenceResponse) ProtoMessage() {} func (m *PresenceResponse) GetIsAvailable() bool { if m != nil && m.IsAvailable != nil { return *m.IsAvailable } return false } func (m *PresenceResponse) GetPresence() PresenceResponse_SHOW { if m != nil && m.Presence != nil { return *m.Presence } return PresenceResponse_NORMAL } func (m *PresenceResponse) GetValid() bool { if m != nil && m.Valid != nil { return *m.Valid } return false } type BulkPresenceRequest struct { Jid []string `protobuf:"bytes,1,rep,name=jid" json:"jid,omitempty"` FromJid *string `protobuf:"bytes,2,opt,name=from_jid" json:"from_jid,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *BulkPresenceRequest) Reset() { *m = BulkPresenceRequest{} } func (m *BulkPresenceRequest) String() string { return proto.CompactTextString(m) } func (*BulkPresenceRequest) ProtoMessage() {} func (m *BulkPresenceRequest) GetJid() []string { if m != nil { return m.Jid } return nil } func (m *BulkPresenceRequest) GetFromJid() string { if m != nil && m.FromJid != nil { return *m.FromJid } return "" } type BulkPresenceResponse struct { PresenceResponse []*PresenceResponse `protobuf:"bytes,1,rep,name=presence_response" json:"presence_response,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *BulkPresenceResponse) Reset() { *m = BulkPresenceResponse{} } func (m *BulkPresenceResponse) String() string { return proto.CompactTextString(m) } func (*BulkPresenceResponse) ProtoMessage() {} func (m *BulkPresenceResponse) GetPresenceResponse() []*PresenceResponse { if m != nil { return m.PresenceResponse } return nil } type XmppMessageRequest struct { Jid []string `protobuf:"bytes,1,rep,name=jid" json:"jid,omitempty"` Body *string `protobuf:"bytes,2,req,name=body" json:"body,omitempty"` RawXml *bool `protobuf:"varint,3,opt,name=raw_xml,def=0" json:"raw_xml,omitempty"` Type *string `protobuf:"bytes,4,opt,name=type,def=chat" json:"type,omitempty"` FromJid *string `protobuf:"bytes,5,opt,name=from_jid" json:"from_jid,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *XmppMessageRequest) Reset() { *m = XmppMessageRequest{} } func (m *XmppMessageRequest) String() string { return proto.CompactTextString(m) } func (*XmppMessageRequest) ProtoMessage() {} const Default_XmppMessageRequest_RawXml bool = false const Default_XmppMessageRequest_Type string = "chat" func (m *XmppMessageRequest) GetJid() []string { if m != nil { return m.Jid } return nil } func (m *XmppMessageRequest) GetBody() string { if m != nil && m.Body != nil { return *m.Body } return "" } func (m *XmppMessageRequest) GetRawXml() bool { if m != nil && m.RawXml != nil { return *m.RawXml } return Default_XmppMessageRequest_RawXml } func (m *XmppMessageRequest) GetType() string { if m != nil && m.Type != nil { return *m.Type } return Default_XmppMessageRequest_Type } func (m *XmppMessageRequest) GetFromJid() string { if m != nil && m.FromJid != nil { return *m.FromJid } return "" } type XmppMessageResponse struct { Status []XmppMessageResponse_XmppMessageStatus `protobuf:"varint,1,rep,name=status,enum=appengine.XmppMessageResponse_XmppMessageStatus" json:"status,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *XmppMessageResponse) Reset() { *m = XmppMessageResponse{} } func (m *XmppMessageResponse) String() string { return proto.CompactTextString(m) } func (*XmppMessageResponse) ProtoMessage() {} func (m *XmppMessageResponse) GetStatus() []XmppMessageResponse_XmppMessageStatus { if m != nil { return m.Status } return nil } type XmppSendPresenceRequest struct { Jid *string `protobuf:"bytes,1,req,name=jid" json:"jid,omitempty"` Type *string `protobuf:"bytes,2,opt,name=type" json:"type,omitempty"` Show *string `protobuf:"bytes,3,opt,name=show" json:"show,omitempty"` Status *string `protobuf:"bytes,4,opt,name=status" json:"status,omitempty"` FromJid *string `protobuf:"bytes,5,opt,name=from_jid" json:"from_jid,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *XmppSendPresenceRequest) Reset() { *m = XmppSendPresenceRequest{} } func (m *XmppSendPresenceRequest) String() string { return proto.CompactTextString(m) } func (*XmppSendPresenceRequest) ProtoMessage() {} func (m *XmppSendPresenceRequest) GetJid() string { if m != nil && m.Jid != nil { return *m.Jid } return "" } func (m *XmppSendPresenceRequest) GetType() string { if m != nil && m.Type != nil { return *m.Type } return "" } func (m *XmppSendPresenceRequest) GetShow() string { if m != nil && m.Show != nil { return *m.Show } return "" } func (m *XmppSendPresenceRequest) GetStatus() string { if m != nil && m.Status != nil { return *m.Status } return "" } func (m *XmppSendPresenceRequest) GetFromJid() string { if m != nil && m.FromJid != nil { return *m.FromJid } return "" } type XmppSendPresenceResponse struct { XXX_unrecognized []byte `json:"-"` } func (m *XmppSendPresenceResponse) Reset() { *m = XmppSendPresenceResponse{} } func (m *XmppSendPresenceResponse) String() string { return proto.CompactTextString(m) } func (*XmppSendPresenceResponse) ProtoMessage() {} type XmppInviteRequest struct { Jid *string `protobuf:"bytes,1,req,name=jid" json:"jid,omitempty"` FromJid *string `protobuf:"bytes,2,opt,name=from_jid" json:"from_jid,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *XmppInviteRequest) Reset() { *m = XmppInviteRequest{} } func (m *XmppInviteRequest) String() string { return proto.CompactTextString(m) } func (*XmppInviteRequest) ProtoMessage() {} func (m *XmppInviteRequest) GetJid() string { if m != nil && m.Jid != nil { return *m.Jid } return "" } func (m *XmppInviteRequest) GetFromJid() string { if m != nil && m.FromJid != nil { return *m.FromJid } return "" } type XmppInviteResponse struct { XXX_unrecognized []byte `json:"-"` } func (m *XmppInviteResponse) Reset() { *m = XmppInviteResponse{} } func (m *XmppInviteResponse) String() string { return proto.CompactTextString(m) } func (*XmppInviteResponse) ProtoMessage() {} func init() { } ================================================ FILE: vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.proto ================================================ syntax = "proto2"; option go_package = "xmpp"; package appengine; message XmppServiceError { enum ErrorCode { UNSPECIFIED_ERROR = 1; INVALID_JID = 2; NO_BODY = 3; INVALID_XML = 4; INVALID_TYPE = 5; INVALID_SHOW = 6; EXCEEDED_MAX_SIZE = 7; APPID_ALIAS_REQUIRED = 8; NONDEFAULT_MODULE = 9; } } message PresenceRequest { required string jid = 1; optional string from_jid = 2; } message PresenceResponse { enum SHOW { NORMAL = 0; AWAY = 1; DO_NOT_DISTURB = 2; CHAT = 3; EXTENDED_AWAY = 4; } required bool is_available = 1; optional SHOW presence = 2; optional bool valid = 3; } message BulkPresenceRequest { repeated string jid = 1; optional string from_jid = 2; } message BulkPresenceResponse { repeated PresenceResponse presence_response = 1; } message XmppMessageRequest { repeated string jid = 1; required string body = 2; optional bool raw_xml = 3 [ default = false ]; optional string type = 4 [ default = "chat" ]; optional string from_jid = 5; } message XmppMessageResponse { enum XmppMessageStatus { NO_ERROR = 0; INVALID_JID = 1; OTHER_ERROR = 2; } repeated XmppMessageStatus status = 1; } message XmppSendPresenceRequest { required string jid = 1; optional string type = 2; optional string show = 3; optional string status = 4; optional string from_jid = 5; } message XmppSendPresenceResponse { } message XmppInviteRequest { required string jid = 1; optional string from_jid = 2; } message XmppInviteResponse { } ================================================ FILE: vendor/google.golang.org/appengine/log/api.go ================================================ // Copyright 2015 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. package log // This file implements the logging API. import ( "golang.org/x/net/context" "google.golang.org/appengine/internal" ) // Debugf formats its arguments according to the format, analogous to fmt.Printf, // and records the text as a log message at Debug level. The message will be associated // with the request linked with the provided context. func Debugf(ctx context.Context, format string, args ...interface{}) { internal.Logf(ctx, 0, format, args...) } // Infof is like Debugf, but at Info level. func Infof(ctx context.Context, format string, args ...interface{}) { internal.Logf(ctx, 1, format, args...) } // Warningf is like Debugf, but at Warning level. func Warningf(ctx context.Context, format string, args ...interface{}) { internal.Logf(ctx, 2, format, args...) } // Errorf is like Debugf, but at Error level. func Errorf(ctx context.Context, format string, args ...interface{}) { internal.Logf(ctx, 3, format, args...) } // Criticalf is like Debugf, but at Critical level. func Criticalf(ctx context.Context, format string, args ...interface{}) { internal.Logf(ctx, 4, format, args...) } ================================================ FILE: vendor/google.golang.org/appengine/log/log.go ================================================ // Copyright 2011 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. /* Package log provides the means of querying an application's logs from within an App Engine application. Example: c := appengine.NewContext(r) query := &log.Query{ AppLogs: true, Versions: []string{"1"}, } for results := query.Run(c); ; { record, err := results.Next() if err == log.Done { log.Infof(c, "Done processing results") break } if err != nil { log.Errorf(c, "Failed to retrieve next log: %v", err) break } log.Infof(c, "Saw record %v", record) } */ package log import ( "errors" "fmt" "strings" "time" "github.com/golang/protobuf/proto" "golang.org/x/net/context" "google.golang.org/appengine" "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/log" ) // Query defines a logs query. type Query struct { // Start time specifies the earliest log to return (inclusive). StartTime time.Time // End time specifies the latest log to return (exclusive). EndTime time.Time // Offset specifies a position within the log stream to resume reading from, // and should come from a previously returned Record's field of the same name. Offset []byte // Incomplete controls whether active (incomplete) requests should be included. Incomplete bool // AppLogs indicates if application-level logs should be included. AppLogs bool // ApplyMinLevel indicates if MinLevel should be used to filter results. ApplyMinLevel bool // If ApplyMinLevel is true, only logs for requests with at least one // application log of MinLevel or higher will be returned. MinLevel int // Versions is the major version IDs whose logs should be retrieved. // Logs for specific modules can be retrieved by the specifying versions // in the form "module:version"; the default module is used if no module // is specified. Versions []string // A list of requests to search for instead of a time-based scan. Cannot be // combined with filtering options such as StartTime, EndTime, Offset, // Incomplete, ApplyMinLevel, or Versions. RequestIDs []string } // AppLog represents a single application-level log. type AppLog struct { Time time.Time Level int Message string } // Record contains all the information for a single web request. type Record struct { AppID string ModuleID string VersionID string RequestID []byte IP string Nickname string AppEngineRelease string // The time when this request started. StartTime time.Time // The time when this request finished. EndTime time.Time // Opaque cursor into the result stream. Offset []byte // The time required to process the request. Latency time.Duration MCycles int64 Method string Resource string HTTPVersion string Status int32 // The size of the request sent back to the client, in bytes. ResponseSize int64 Referrer string UserAgent string URLMapEntry string Combined string Host string // The estimated cost of this request, in dollars. Cost float64 TaskQueueName string TaskName string WasLoadingRequest bool PendingTime time.Duration Finished bool AppLogs []AppLog // Mostly-unique identifier for the instance that handled the request if available. InstanceID string } // Result represents the result of a query. type Result struct { logs []*Record context context.Context request *pb.LogReadRequest resultsSeen bool err error } // Next returns the next log record, func (qr *Result) Next() (*Record, error) { if qr.err != nil { return nil, qr.err } if len(qr.logs) > 0 { lr := qr.logs[0] qr.logs = qr.logs[1:] return lr, nil } if qr.request.Offset == nil && qr.resultsSeen { return nil, Done } if err := qr.run(); err != nil { // Errors here may be retried, so don't store the error. return nil, err } return qr.Next() } // Done is returned when a query iteration has completed. var Done = errors.New("log: query has no more results") // protoToAppLogs takes as input an array of pointers to LogLines, the internal // Protocol Buffer representation of a single application-level log, // and converts it to an array of AppLogs, the external representation // of an application-level log. func protoToAppLogs(logLines []*pb.LogLine) []AppLog { appLogs := make([]AppLog, len(logLines)) for i, line := range logLines { appLogs[i] = AppLog{ Time: time.Unix(0, *line.Time*1e3), Level: int(*line.Level), Message: *line.LogMessage, } } return appLogs } // protoToRecord converts a RequestLog, the internal Protocol Buffer // representation of a single request-level log, to a Record, its // corresponding external representation. func protoToRecord(rl *pb.RequestLog) *Record { offset, err := proto.Marshal(rl.Offset) if err != nil { offset = nil } return &Record{ AppID: *rl.AppId, ModuleID: rl.GetModuleId(), VersionID: *rl.VersionId, RequestID: rl.RequestId, Offset: offset, IP: *rl.Ip, Nickname: rl.GetNickname(), AppEngineRelease: string(rl.GetAppEngineRelease()), StartTime: time.Unix(0, *rl.StartTime*1e3), EndTime: time.Unix(0, *rl.EndTime*1e3), Latency: time.Duration(*rl.Latency) * time.Microsecond, MCycles: *rl.Mcycles, Method: *rl.Method, Resource: *rl.Resource, HTTPVersion: *rl.HttpVersion, Status: *rl.Status, ResponseSize: *rl.ResponseSize, Referrer: rl.GetReferrer(), UserAgent: rl.GetUserAgent(), URLMapEntry: *rl.UrlMapEntry, Combined: *rl.Combined, Host: rl.GetHost(), Cost: rl.GetCost(), TaskQueueName: rl.GetTaskQueueName(), TaskName: rl.GetTaskName(), WasLoadingRequest: rl.GetWasLoadingRequest(), PendingTime: time.Duration(rl.GetPendingTime()) * time.Microsecond, Finished: rl.GetFinished(), AppLogs: protoToAppLogs(rl.Line), InstanceID: string(rl.GetCloneKey()), } } // Run starts a query for log records, which contain request and application // level log information. func (params *Query) Run(c context.Context) *Result { req, err := makeRequest(params, internal.FullyQualifiedAppID(c), appengine.VersionID(c)) return &Result{ context: c, request: req, err: err, } } func makeRequest(params *Query, appID, versionID string) (*pb.LogReadRequest, error) { req := &pb.LogReadRequest{} req.AppId = &appID if !params.StartTime.IsZero() { req.StartTime = proto.Int64(params.StartTime.UnixNano() / 1e3) } if !params.EndTime.IsZero() { req.EndTime = proto.Int64(params.EndTime.UnixNano() / 1e3) } if len(params.Offset) > 0 { var offset pb.LogOffset if err := proto.Unmarshal(params.Offset, &offset); err != nil { return nil, fmt.Errorf("bad Offset: %v", err) } req.Offset = &offset } if params.Incomplete { req.IncludeIncomplete = ¶ms.Incomplete } if params.AppLogs { req.IncludeAppLogs = ¶ms.AppLogs } if params.ApplyMinLevel { req.MinimumLogLevel = proto.Int32(int32(params.MinLevel)) } if params.Versions == nil { // If no versions were specified, default to the default module at // the major version being used by this module. if i := strings.Index(versionID, "."); i >= 0 { versionID = versionID[:i] } req.VersionId = []string{versionID} } else { req.ModuleVersion = make([]*pb.LogModuleVersion, 0, len(params.Versions)) for _, v := range params.Versions { var m *string if i := strings.Index(v, ":"); i >= 0 { m, v = proto.String(v[:i]), v[i+1:] } req.ModuleVersion = append(req.ModuleVersion, &pb.LogModuleVersion{ ModuleId: m, VersionId: proto.String(v), }) } } if params.RequestIDs != nil { ids := make([][]byte, len(params.RequestIDs)) for i, v := range params.RequestIDs { ids[i] = []byte(v) } req.RequestId = ids } return req, nil } // run takes the query Result produced by a call to Run and updates it with // more Records. The updated Result contains a new set of logs as well as an // offset to where more logs can be found. We also convert the items in the // response from their internal representations to external versions of the // same structs. func (r *Result) run() error { res := &pb.LogReadResponse{} if err := internal.Call(r.context, "logservice", "Read", r.request, res); err != nil { return err } r.logs = make([]*Record, len(res.Log)) r.request.Offset = res.Offset r.resultsSeen = true for i, log := range res.Log { r.logs[i] = protoToRecord(log) } return nil } func init() { internal.RegisterErrorCodeMap("logservice", pb.LogServiceError_ErrorCode_name) } ================================================ FILE: vendor/google.golang.org/appengine/mail/mail.go ================================================ // Copyright 2011 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. /* Package mail provides the means of sending email from an App Engine application. Example: msg := &mail.Message{ Sender: "romeo@montague.com", To: []string{"Juliet "}, Subject: "See you tonight", Body: "Don't forget our plans. Hark, 'til later.", } if err := mail.Send(c, msg); err != nil { log.Errorf(c, "Alas, my user, the email failed to sendeth: %v", err) } */ package mail import ( "net/mail" "github.com/golang/protobuf/proto" "golang.org/x/net/context" "google.golang.org/appengine/internal" bpb "google.golang.org/appengine/internal/base" pb "google.golang.org/appengine/internal/mail" ) // A Message represents an email message. // Addresses may be of any form permitted by RFC 822. type Message struct { // Sender must be set, and must be either an application admin // or the currently signed-in user. Sender string ReplyTo string // may be empty // At least one of these slices must have a non-zero length, // except when calling SendToAdmins. To, Cc, Bcc []string Subject string // At least one of Body or HTMLBody must be non-empty. Body string HTMLBody string Attachments []Attachment // Extra mail headers. // See https://cloud.google.com/appengine/docs/go/mail/ // for permissible headers. Headers mail.Header } // An Attachment represents an email attachment. type Attachment struct { // Name must be set to a valid file name. Name string Data []byte ContentID string } // Send sends an email message. func Send(c context.Context, msg *Message) error { return send(c, "Send", msg) } // SendToAdmins sends an email message to the application's administrators. func SendToAdmins(c context.Context, msg *Message) error { return send(c, "SendToAdmins", msg) } func send(c context.Context, method string, msg *Message) error { req := &pb.MailMessage{ Sender: &msg.Sender, To: msg.To, Cc: msg.Cc, Bcc: msg.Bcc, Subject: &msg.Subject, } if msg.ReplyTo != "" { req.ReplyTo = &msg.ReplyTo } if msg.Body != "" { req.TextBody = &msg.Body } if msg.HTMLBody != "" { req.HtmlBody = &msg.HTMLBody } if len(msg.Attachments) > 0 { req.Attachment = make([]*pb.MailAttachment, len(msg.Attachments)) for i, att := range msg.Attachments { req.Attachment[i] = &pb.MailAttachment{ FileName: proto.String(att.Name), Data: att.Data, } if att.ContentID != "" { req.Attachment[i].ContentID = proto.String(att.ContentID) } } } for key, vs := range msg.Headers { for _, v := range vs { req.Header = append(req.Header, &pb.MailHeader{ Name: proto.String(key), Value: proto.String(v), }) } } res := &bpb.VoidProto{} if err := internal.Call(c, "mail", method, req, res); err != nil { return err } return nil } func init() { internal.RegisterErrorCodeMap("mail", pb.MailServiceError_ErrorCode_name) } ================================================ FILE: vendor/google.golang.org/appengine/memcache/memcache.go ================================================ // Copyright 2011 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. // Package memcache provides a client for App Engine's distributed in-memory // key-value store for small chunks of arbitrary data. // // The fundamental operations get and set items, keyed by a string. // // item0, err := memcache.Get(c, "key") // if err != nil && err != memcache.ErrCacheMiss { // return err // } // if err == nil { // fmt.Fprintf(w, "memcache hit: Key=%q Val=[% x]\n", item0.Key, item0.Value) // } else { // fmt.Fprintf(w, "memcache miss\n") // } // // and // // item1 := &memcache.Item{ // Key: "foo", // Value: []byte("bar"), // } // if err := memcache.Set(c, item1); err != nil { // return err // } package memcache import ( "bytes" "encoding/gob" "encoding/json" "errors" "time" "github.com/golang/protobuf/proto" "golang.org/x/net/context" "google.golang.org/appengine" "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/memcache" ) var ( // ErrCacheMiss means that an operation failed // because the item wasn't present. ErrCacheMiss = errors.New("memcache: cache miss") // ErrCASConflict means that a CompareAndSwap call failed due to the // cached value being modified between the Get and the CompareAndSwap. // If the cached value was simply evicted rather than replaced, // ErrNotStored will be returned instead. ErrCASConflict = errors.New("memcache: compare-and-swap conflict") // ErrNoStats means that no statistics were available. ErrNoStats = errors.New("memcache: no statistics available") // ErrNotStored means that a conditional write operation (i.e. Add or // CompareAndSwap) failed because the condition was not satisfied. ErrNotStored = errors.New("memcache: item not stored") // ErrServerError means that a server error occurred. ErrServerError = errors.New("memcache: server error") ) // Item is the unit of memcache gets and sets. type Item struct { // Key is the Item's key (250 bytes maximum). Key string // Value is the Item's value. Value []byte // Object is the Item's value for use with a Codec. Object interface{} // Flags are server-opaque flags whose semantics are entirely up to the // App Engine app. Flags uint32 // Expiration is the maximum duration that the item will stay // in the cache. // The zero value means the Item has no expiration time. // Subsecond precision is ignored. // This is not set when getting items. Expiration time.Duration // casID is a client-opaque value used for compare-and-swap operations. // Zero means that compare-and-swap is not used. casID uint64 } const ( secondsIn30Years = 60 * 60 * 24 * 365 * 30 // from memcache server code thirtyYears = time.Duration(secondsIn30Years) * time.Second ) // protoToItem converts a protocol buffer item to a Go struct. func protoToItem(p *pb.MemcacheGetResponse_Item) *Item { return &Item{ Key: string(p.Key), Value: p.Value, Flags: p.GetFlags(), casID: p.GetCasId(), } } // If err is an appengine.MultiError, return its first element. Otherwise, return err. func singleError(err error) error { if me, ok := err.(appengine.MultiError); ok { return me[0] } return err } // Get gets the item for the given key. ErrCacheMiss is returned for a memcache // cache miss. The key must be at most 250 bytes in length. func Get(c context.Context, key string) (*Item, error) { m, err := GetMulti(c, []string{key}) if err != nil { return nil, err } if _, ok := m[key]; !ok { return nil, ErrCacheMiss } return m[key], nil } // GetMulti is a batch version of Get. The returned map from keys to items may // have fewer elements than the input slice, due to memcache cache misses. // Each key must be at most 250 bytes in length. func GetMulti(c context.Context, key []string) (map[string]*Item, error) { if len(key) == 0 { return nil, nil } keyAsBytes := make([][]byte, len(key)) for i, k := range key { keyAsBytes[i] = []byte(k) } req := &pb.MemcacheGetRequest{ Key: keyAsBytes, ForCas: proto.Bool(true), } res := &pb.MemcacheGetResponse{} if err := internal.Call(c, "memcache", "Get", req, res); err != nil { return nil, err } m := make(map[string]*Item, len(res.Item)) for _, p := range res.Item { t := protoToItem(p) m[t.Key] = t } return m, nil } // Delete deletes the item for the given key. // ErrCacheMiss is returned if the specified item can not be found. // The key must be at most 250 bytes in length. func Delete(c context.Context, key string) error { return singleError(DeleteMulti(c, []string{key})) } // DeleteMulti is a batch version of Delete. // If any keys cannot be found, an appengine.MultiError is returned. // Each key must be at most 250 bytes in length. func DeleteMulti(c context.Context, key []string) error { if len(key) == 0 { return nil } req := &pb.MemcacheDeleteRequest{ Item: make([]*pb.MemcacheDeleteRequest_Item, len(key)), } for i, k := range key { req.Item[i] = &pb.MemcacheDeleteRequest_Item{Key: []byte(k)} } res := &pb.MemcacheDeleteResponse{} if err := internal.Call(c, "memcache", "Delete", req, res); err != nil { return err } if len(res.DeleteStatus) != len(key) { return ErrServerError } me, any := make(appengine.MultiError, len(key)), false for i, s := range res.DeleteStatus { switch s { case pb.MemcacheDeleteResponse_DELETED: // OK case pb.MemcacheDeleteResponse_NOT_FOUND: me[i] = ErrCacheMiss any = true default: me[i] = ErrServerError any = true } } if any { return me } return nil } // Increment atomically increments the decimal value in the given key // by delta and returns the new value. The value must fit in a uint64. // Overflow wraps around, and underflow is capped to zero. The // provided delta may be negative. If the key doesn't exist in // memcache, the provided initial value is used to atomically // populate it before the delta is applied. // The key must be at most 250 bytes in length. func Increment(c context.Context, key string, delta int64, initialValue uint64) (newValue uint64, err error) { return incr(c, key, delta, &initialValue) } // IncrementExisting works like Increment but assumes that the key // already exists in memcache and doesn't take an initial value. // IncrementExisting can save work if calculating the initial value is // expensive. // An error is returned if the specified item can not be found. func IncrementExisting(c context.Context, key string, delta int64) (newValue uint64, err error) { return incr(c, key, delta, nil) } func incr(c context.Context, key string, delta int64, initialValue *uint64) (newValue uint64, err error) { req := &pb.MemcacheIncrementRequest{ Key: []byte(key), InitialValue: initialValue, } if delta >= 0 { req.Delta = proto.Uint64(uint64(delta)) } else { req.Delta = proto.Uint64(uint64(-delta)) req.Direction = pb.MemcacheIncrementRequest_DECREMENT.Enum() } res := &pb.MemcacheIncrementResponse{} err = internal.Call(c, "memcache", "Increment", req, res) if err != nil { return } if res.NewValue == nil { return 0, ErrCacheMiss } return *res.NewValue, nil } // set sets the given items using the given conflict resolution policy. // appengine.MultiError may be returned. func set(c context.Context, item []*Item, value [][]byte, policy pb.MemcacheSetRequest_SetPolicy) error { if len(item) == 0 { return nil } req := &pb.MemcacheSetRequest{ Item: make([]*pb.MemcacheSetRequest_Item, len(item)), } for i, t := range item { p := &pb.MemcacheSetRequest_Item{ Key: []byte(t.Key), } if value == nil { p.Value = t.Value } else { p.Value = value[i] } if t.Flags != 0 { p.Flags = proto.Uint32(t.Flags) } if t.Expiration != 0 { // In the .proto file, MemcacheSetRequest_Item uses a fixed32 (i.e. unsigned) // for expiration time, while MemcacheGetRequest_Item uses int32 (i.e. signed). // Throughout this .go file, we use int32. // Also, in the proto, the expiration value is either a duration (in seconds) // or an absolute Unix timestamp (in seconds), depending on whether the // value is less than or greater than or equal to 30 years, respectively. if t.Expiration < time.Second { // Because an Expiration of 0 means no expiration, we take // care here to translate an item with an expiration // Duration between 0-1 seconds as immediately expiring // (saying it expired a few seconds ago), rather than // rounding it down to 0 and making it live forever. p.ExpirationTime = proto.Uint32(uint32(time.Now().Unix()) - 5) } else if t.Expiration >= thirtyYears { p.ExpirationTime = proto.Uint32(uint32(time.Now().Unix()) + uint32(t.Expiration/time.Second)) } else { p.ExpirationTime = proto.Uint32(uint32(t.Expiration / time.Second)) } } if t.casID != 0 { p.CasId = proto.Uint64(t.casID) p.ForCas = proto.Bool(true) } p.SetPolicy = policy.Enum() req.Item[i] = p } res := &pb.MemcacheSetResponse{} if err := internal.Call(c, "memcache", "Set", req, res); err != nil { return err } if len(res.SetStatus) != len(item) { return ErrServerError } me, any := make(appengine.MultiError, len(item)), false for i, st := range res.SetStatus { var err error switch st { case pb.MemcacheSetResponse_STORED: // OK case pb.MemcacheSetResponse_NOT_STORED: err = ErrNotStored case pb.MemcacheSetResponse_EXISTS: err = ErrCASConflict default: err = ErrServerError } if err != nil { me[i] = err any = true } } if any { return me } return nil } // Set writes the given item, unconditionally. func Set(c context.Context, item *Item) error { return singleError(set(c, []*Item{item}, nil, pb.MemcacheSetRequest_SET)) } // SetMulti is a batch version of Set. // appengine.MultiError may be returned. func SetMulti(c context.Context, item []*Item) error { return set(c, item, nil, pb.MemcacheSetRequest_SET) } // Add writes the given item, if no value already exists for its key. // ErrNotStored is returned if that condition is not met. func Add(c context.Context, item *Item) error { return singleError(set(c, []*Item{item}, nil, pb.MemcacheSetRequest_ADD)) } // AddMulti is a batch version of Add. // appengine.MultiError may be returned. func AddMulti(c context.Context, item []*Item) error { return set(c, item, nil, pb.MemcacheSetRequest_ADD) } // CompareAndSwap writes the given item that was previously returned by Get, // if the value was neither modified or evicted between the Get and the // CompareAndSwap calls. The item's Key should not change between calls but // all other item fields may differ. // ErrCASConflict is returned if the value was modified in between the calls. // ErrNotStored is returned if the value was evicted in between the calls. func CompareAndSwap(c context.Context, item *Item) error { return singleError(set(c, []*Item{item}, nil, pb.MemcacheSetRequest_CAS)) } // CompareAndSwapMulti is a batch version of CompareAndSwap. // appengine.MultiError may be returned. func CompareAndSwapMulti(c context.Context, item []*Item) error { return set(c, item, nil, pb.MemcacheSetRequest_CAS) } // Codec represents a symmetric pair of functions that implement a codec. // Items stored into or retrieved from memcache using a Codec have their // values marshaled or unmarshaled. // // All the methods provided for Codec behave analogously to the package level // function with same name. type Codec struct { Marshal func(interface{}) ([]byte, error) Unmarshal func([]byte, interface{}) error } // Get gets the item for the given key and decodes the obtained value into v. // ErrCacheMiss is returned for a memcache cache miss. // The key must be at most 250 bytes in length. func (cd Codec) Get(c context.Context, key string, v interface{}) (*Item, error) { i, err := Get(c, key) if err != nil { return nil, err } if err := cd.Unmarshal(i.Value, v); err != nil { return nil, err } return i, nil } func (cd Codec) set(c context.Context, items []*Item, policy pb.MemcacheSetRequest_SetPolicy) error { var vs [][]byte var me appengine.MultiError for i, item := range items { v, err := cd.Marshal(item.Object) if err != nil { if me == nil { me = make(appengine.MultiError, len(items)) } me[i] = err continue } if me == nil { vs = append(vs, v) } } if me != nil { return me } return set(c, items, vs, policy) } // Set writes the given item, unconditionally. func (cd Codec) Set(c context.Context, item *Item) error { return singleError(cd.set(c, []*Item{item}, pb.MemcacheSetRequest_SET)) } // SetMulti is a batch version of Set. // appengine.MultiError may be returned. func (cd Codec) SetMulti(c context.Context, items []*Item) error { return cd.set(c, items, pb.MemcacheSetRequest_SET) } // Add writes the given item, if no value already exists for its key. // ErrNotStored is returned if that condition is not met. func (cd Codec) Add(c context.Context, item *Item) error { return singleError(cd.set(c, []*Item{item}, pb.MemcacheSetRequest_ADD)) } // AddMulti is a batch version of Add. // appengine.MultiError may be returned. func (cd Codec) AddMulti(c context.Context, items []*Item) error { return cd.set(c, items, pb.MemcacheSetRequest_ADD) } // CompareAndSwap writes the given item that was previously returned by Get, // if the value was neither modified or evicted between the Get and the // CompareAndSwap calls. The item's Key should not change between calls but // all other item fields may differ. // ErrCASConflict is returned if the value was modified in between the calls. // ErrNotStored is returned if the value was evicted in between the calls. func (cd Codec) CompareAndSwap(c context.Context, item *Item) error { return singleError(cd.set(c, []*Item{item}, pb.MemcacheSetRequest_CAS)) } // CompareAndSwapMulti is a batch version of CompareAndSwap. // appengine.MultiError may be returned. func (cd Codec) CompareAndSwapMulti(c context.Context, items []*Item) error { return cd.set(c, items, pb.MemcacheSetRequest_CAS) } var ( // Gob is a Codec that uses the gob package. Gob = Codec{gobMarshal, gobUnmarshal} // JSON is a Codec that uses the json package. JSON = Codec{json.Marshal, json.Unmarshal} ) func gobMarshal(v interface{}) ([]byte, error) { var buf bytes.Buffer if err := gob.NewEncoder(&buf).Encode(v); err != nil { return nil, err } return buf.Bytes(), nil } func gobUnmarshal(data []byte, v interface{}) error { return gob.NewDecoder(bytes.NewBuffer(data)).Decode(v) } // Statistics represents a set of statistics about the memcache cache. // This may include items that have expired but have not yet been removed from the cache. type Statistics struct { Hits uint64 // Counter of cache hits Misses uint64 // Counter of cache misses ByteHits uint64 // Counter of bytes transferred for gets Items uint64 // Items currently in the cache Bytes uint64 // Size of all items currently in the cache Oldest int64 // Age of access of the oldest item, in seconds } // Stats retrieves the current memcache statistics. func Stats(c context.Context) (*Statistics, error) { req := &pb.MemcacheStatsRequest{} res := &pb.MemcacheStatsResponse{} if err := internal.Call(c, "memcache", "Stats", req, res); err != nil { return nil, err } if res.Stats == nil { return nil, ErrNoStats } return &Statistics{ Hits: *res.Stats.Hits, Misses: *res.Stats.Misses, ByteHits: *res.Stats.ByteHits, Items: *res.Stats.Items, Bytes: *res.Stats.Bytes, Oldest: int64(*res.Stats.OldestItemAge), }, nil } // Flush flushes all items from memcache. func Flush(c context.Context) error { req := &pb.MemcacheFlushRequest{} res := &pb.MemcacheFlushResponse{} return internal.Call(c, "memcache", "FlushAll", req, res) } func namespaceMod(m proto.Message, namespace string) { switch m := m.(type) { case *pb.MemcacheDeleteRequest: if m.NameSpace == nil { m.NameSpace = &namespace } case *pb.MemcacheGetRequest: if m.NameSpace == nil { m.NameSpace = &namespace } case *pb.MemcacheIncrementRequest: if m.NameSpace == nil { m.NameSpace = &namespace } case *pb.MemcacheSetRequest: if m.NameSpace == nil { m.NameSpace = &namespace } // MemcacheFlushRequest, MemcacheStatsRequest do not apply namespace. } } func init() { internal.RegisterErrorCodeMap("memcache", pb.MemcacheServiceError_ErrorCode_name) internal.NamespaceMods["memcache"] = namespaceMod } ================================================ FILE: vendor/google.golang.org/appengine/module/module.go ================================================ // Copyright 2013 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. /* Package module provides functions for interacting with modules. The appengine package contains functions that report the identity of the app, including the module name. */ package module import ( "github.com/golang/protobuf/proto" "golang.org/x/net/context" "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/modules" ) // List returns the names of modules belonging to this application. func List(c context.Context) ([]string, error) { req := &pb.GetModulesRequest{} res := &pb.GetModulesResponse{} err := internal.Call(c, "modules", "GetModules", req, res) return res.Module, err } // NumInstances returns the number of instances of the given module/version. // If either argument is the empty string it means the default. func NumInstances(c context.Context, module, version string) (int, error) { req := &pb.GetNumInstancesRequest{} if module != "" { req.Module = &module } if version != "" { req.Version = &version } res := &pb.GetNumInstancesResponse{} if err := internal.Call(c, "modules", "GetNumInstances", req, res); err != nil { return 0, err } return int(*res.Instances), nil } // SetNumInstances sets the number of instances of the given module.version to the // specified value. If either module or version are the empty string it means the // default. func SetNumInstances(c context.Context, module, version string, instances int) error { req := &pb.SetNumInstancesRequest{} if module != "" { req.Module = &module } if version != "" { req.Version = &version } req.Instances = proto.Int64(int64(instances)) res := &pb.SetNumInstancesResponse{} return internal.Call(c, "modules", "SetNumInstances", req, res) } // Versions returns the names of the versions that belong to the specified module. // If module is the empty string, it means the default module. func Versions(c context.Context, module string) ([]string, error) { req := &pb.GetVersionsRequest{} if module != "" { req.Module = &module } res := &pb.GetVersionsResponse{} err := internal.Call(c, "modules", "GetVersions", req, res) return res.GetVersion(), err } // DefaultVersion returns the default version of the specified module. // If module is the empty string, it means the default module. func DefaultVersion(c context.Context, module string) (string, error) { req := &pb.GetDefaultVersionRequest{} if module != "" { req.Module = &module } res := &pb.GetDefaultVersionResponse{} err := internal.Call(c, "modules", "GetDefaultVersion", req, res) return res.GetVersion(), err } // Start starts the specified version of the specified module. // If either module or version are the empty string, it means the default. func Start(c context.Context, module, version string) error { req := &pb.StartModuleRequest{} if module != "" { req.Module = &module } if version != "" { req.Version = &version } res := &pb.StartModuleResponse{} return internal.Call(c, "modules", "StartModule", req, res) } // Stop stops the specified version of the specified module. // If either module or version are the empty string, it means the default. func Stop(c context.Context, module, version string) error { req := &pb.StopModuleRequest{} if module != "" { req.Module = &module } if version != "" { req.Version = &version } res := &pb.StopModuleResponse{} return internal.Call(c, "modules", "StopModule", req, res) } ================================================ FILE: vendor/google.golang.org/appengine/namespace.go ================================================ // Copyright 2012 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. package appengine import ( "fmt" "regexp" "golang.org/x/net/context" "google.golang.org/appengine/internal" ) // Namespace returns a replacement context that operates within the given namespace. func Namespace(c context.Context, namespace string) (context.Context, error) { if !validNamespace.MatchString(namespace) { return nil, fmt.Errorf("appengine: namespace %q does not match /%s/", namespace, validNamespace) } return internal.NamespacedContext(c, namespace), nil } // validNamespace matches valid namespace names. var validNamespace = regexp.MustCompile(`^[0-9A-Za-z._-]{0,100}$`) ================================================ FILE: vendor/google.golang.org/appengine/remote_api/client.go ================================================ // Copyright 2013 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. package remote_api // This file provides the client for connecting remotely to a user's production // application. import ( "bytes" "fmt" "io/ioutil" "log" "math/rand" "net/http" "net/url" "regexp" "strconv" "strings" "time" "github.com/golang/protobuf/proto" "golang.org/x/net/context" "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/remote_api" ) // NewRemoteContext returns a context that gives access to the production // APIs for the application at the given host. All communication will be // performed over SSL unless the host is localhost. func NewRemoteContext(host string, client *http.Client) (context.Context, error) { // Add an appcfg header to outgoing requests. t := client.Transport if t == nil { t = http.DefaultTransport } client.Transport = &headerAddingRoundTripper{t} url := url.URL{ Scheme: "https", Host: host, Path: "/_ah/remote_api", } if host == "localhost" || strings.HasPrefix(host, "localhost:") { url.Scheme = "http" } u := url.String() appID, err := getAppID(client, u) if err != nil { return nil, fmt.Errorf("unable to contact server: %v", err) } rc := &remoteContext{ client: client, url: u, } ctx := internal.WithCallOverride(context.Background(), rc.call) ctx = internal.WithLogOverride(ctx, rc.logf) ctx = internal.WithAppIDOverride(ctx, appID) return ctx, nil } type remoteContext struct { client *http.Client url string } var logLevels = map[int64]string{ 0: "DEBUG", 1: "INFO", 2: "WARNING", 3: "ERROR", 4: "CRITICAL", } func (c *remoteContext) logf(level int64, format string, args ...interface{}) { log.Printf(logLevels[level]+": "+format, args...) } func (c *remoteContext) call(ctx context.Context, service, method string, in, out proto.Message) error { req, err := proto.Marshal(in) if err != nil { return fmt.Errorf("error marshalling request: %v", err) } remReq := &pb.Request{ ServiceName: proto.String(service), Method: proto.String(method), Request: req, // NOTE(djd): RequestId is unused in the server. } req, err = proto.Marshal(remReq) if err != nil { return fmt.Errorf("proto.Marshal: %v", err) } // TODO(djd): Respect ctx.Deadline()? resp, err := c.client.Post(c.url, "application/octet-stream", bytes.NewReader(req)) if err != nil { return fmt.Errorf("error sending request: %v", err) } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if resp.StatusCode != http.StatusOK { return fmt.Errorf("bad response %d; body: %q", resp.StatusCode, body) } if err != nil { return fmt.Errorf("failed reading response: %v", err) } remResp := &pb.Response{} if err := proto.Unmarshal(body, remResp); err != nil { return fmt.Errorf("error unmarshalling response: %v", err) } if ae := remResp.GetApplicationError(); ae != nil { return &internal.APIError{ Code: ae.GetCode(), Detail: ae.GetDetail(), Service: service, } } if remResp.Response == nil { return fmt.Errorf("unexpected response: %s", proto.MarshalTextString(remResp)) } return proto.Unmarshal(remResp.Response, out) } // This is a forgiving regexp designed to parse the app ID from YAML. var appIDRE = regexp.MustCompile(`app_id["']?\s*:\s*['"]?([-a-z0-9.:~]+)`) func getAppID(client *http.Client, url string) (string, error) { // Generate a pseudo-random token for handshaking. token := strconv.Itoa(rand.New(rand.NewSource(time.Now().UnixNano())).Int()) resp, err := client.Get(fmt.Sprintf("%s?rtok=%s", url, token)) if err != nil { return "", err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if resp.StatusCode != http.StatusOK { return "", fmt.Errorf("bad response %d; body: %q", resp.StatusCode, body) } if err != nil { return "", fmt.Errorf("failed reading response: %v", err) } // Check the token is present in response. if !bytes.Contains(body, []byte(token)) { return "", fmt.Errorf("token not found: want %q; body %q", token, body) } match := appIDRE.FindSubmatch(body) if match == nil { return "", fmt.Errorf("app ID not found: body %q", body) } return string(match[1]), nil } type headerAddingRoundTripper struct { Wrapped http.RoundTripper } func (t *headerAddingRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { r.Header.Set("X-Appcfg-Api-Version", "1") return t.Wrapped.RoundTrip(r) } ================================================ FILE: vendor/google.golang.org/appengine/remote_api/remote_api.go ================================================ // Copyright 2012 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. /* Package remote_api implements the /_ah/remote_api endpoint. This endpoint is used by offline tools such as the bulk loader. */ package remote_api import ( "fmt" "io" "io/ioutil" "net/http" "strconv" "github.com/golang/protobuf/proto" "google.golang.org/appengine" "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/remote_api" "google.golang.org/appengine/log" "google.golang.org/appengine/user" ) func init() { http.HandleFunc("/_ah/remote_api", handle) } func handle(w http.ResponseWriter, req *http.Request) { c := appengine.NewContext(req) u := user.Current(c) if u == nil { u, _ = user.CurrentOAuth(c, "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/appengine.apis", ) } if u == nil || !u.Admin { w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.WriteHeader(http.StatusUnauthorized) io.WriteString(w, "You must be logged in as an administrator to access this.\n") return } if req.Header.Get("X-Appcfg-Api-Version") == "" { w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.WriteHeader(http.StatusForbidden) io.WriteString(w, "This request did not contain a necessary header.\n") return } if req.Method != "POST" { // Response must be YAML. rtok := req.FormValue("rtok") if rtok == "" { rtok = "0" } w.Header().Set("Content-Type", "text/yaml; charset=utf-8") fmt.Fprintf(w, `{app_id: %q, rtok: %q}`, internal.FullyQualifiedAppID(c), rtok) return } defer req.Body.Close() body, err := ioutil.ReadAll(req.Body) if err != nil { w.WriteHeader(http.StatusBadRequest) log.Errorf(c, "Failed reading body: %v", err) return } remReq := &pb.Request{} if err := proto.Unmarshal(body, remReq); err != nil { w.WriteHeader(http.StatusBadRequest) log.Errorf(c, "Bad body: %v", err) return } service, method := *remReq.ServiceName, *remReq.Method if !requestSupported(service, method) { w.WriteHeader(http.StatusBadRequest) log.Errorf(c, "Unsupported RPC /%s.%s", service, method) return } rawReq := &rawMessage{remReq.Request} rawRes := &rawMessage{} err = internal.Call(c, service, method, rawReq, rawRes) remRes := &pb.Response{} if err == nil { remRes.Response = rawRes.buf } else if ae, ok := err.(*internal.APIError); ok { remRes.ApplicationError = &pb.ApplicationError{ Code: &ae.Code, Detail: &ae.Detail, } } else { // This shouldn't normally happen. log.Errorf(c, "appengine/remote_api: Unexpected error of type %T: %v", err, err) remRes.ApplicationError = &pb.ApplicationError{ Code: proto.Int32(0), Detail: proto.String(err.Error()), } } out, err := proto.Marshal(remRes) if err != nil { // This should not be possible. w.WriteHeader(500) log.Errorf(c, "proto.Marshal: %v", err) return } log.Infof(c, "Spooling %d bytes of response to /%s.%s", len(out), service, method) w.Header().Set("Content-Type", "application/octet-stream") w.Header().Set("Content-Length", strconv.Itoa(len(out))) w.Write(out) } // rawMessage is a protocol buffer type that is already serialised. // This allows the remote_api code here to handle messages // without having to know the real type. type rawMessage struct { buf []byte } func (rm *rawMessage) Marshal() ([]byte, error) { return rm.buf, nil } func (rm *rawMessage) Unmarshal(buf []byte) error { rm.buf = make([]byte, len(buf)) copy(rm.buf, buf) return nil } func requestSupported(service, method string) bool { // This list of supported services is taken from SERVICE_PB_MAP in remote_api_services.py switch service { case "app_identity_service", "blobstore", "capability_service", "channel", "datastore_v3", "datastore_v4", "file", "images", "logservice", "mail", "matcher", "memcache", "remote_datastore", "remote_socket", "search", "modules", "system", "taskqueue", "urlfetch", "user", "xmpp": return true } return false } // Methods to satisfy proto.Message. func (rm *rawMessage) Reset() { rm.buf = nil } func (rm *rawMessage) String() string { return strconv.Quote(string(rm.buf)) } func (*rawMessage) ProtoMessage() {} ================================================ FILE: vendor/google.golang.org/appengine/runtime/runtime.go ================================================ // Copyright 2011 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. /* Package runtime exposes information about the resource usage of the application. It also provides a way to run code in a new background context of a module. This package does not work on Managed VMs. */ package runtime import ( "net/http" "golang.org/x/net/context" "google.golang.org/appengine" "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/system" ) // Statistics represents the system's statistics. type Statistics struct { // CPU records the CPU consumed by this instance, in megacycles. CPU struct { Total float64 Rate1M float64 // consumption rate over one minute Rate10M float64 // consumption rate over ten minutes } // RAM records the memory used by the instance, in megabytes. RAM struct { Current float64 Average1M float64 // average usage over one minute Average10M float64 // average usage over ten minutes } } func Stats(c context.Context) (*Statistics, error) { req := &pb.GetSystemStatsRequest{} res := &pb.GetSystemStatsResponse{} if err := internal.Call(c, "system", "GetSystemStats", req, res); err != nil { return nil, err } s := &Statistics{} if res.Cpu != nil { s.CPU.Total = res.Cpu.GetTotal() s.CPU.Rate1M = res.Cpu.GetRate1M() s.CPU.Rate10M = res.Cpu.GetRate10M() } if res.Memory != nil { s.RAM.Current = res.Memory.GetCurrent() s.RAM.Average1M = res.Memory.GetAverage1M() s.RAM.Average10M = res.Memory.GetAverage10M() } return s, nil } /* RunInBackground makes an API call that triggers an /_ah/background request. There are two independent code paths that need to make contact: the RunInBackground code, and the /_ah/background handler. The matchmaker loop arranges for the two paths to meet. The RunInBackground code passes a send to the matchmaker, the /_ah/background passes a recv to the matchmaker, and the matchmaker hooks them up. */ func init() { http.HandleFunc("/_ah/background", handleBackground) sc := make(chan send) rc := make(chan recv) sendc, recvc = sc, rc go matchmaker(sc, rc) } var ( sendc chan<- send // RunInBackground sends to this recvc chan<- recv // handleBackground sends to this ) type send struct { id string f func(context.Context) } type recv struct { id string ch chan<- func(context.Context) } func matchmaker(sendc <-chan send, recvc <-chan recv) { // When one side of the match arrives before the other // it is inserted in the corresponding map. waitSend := make(map[string]send) waitRecv := make(map[string]recv) for { select { case s := <-sendc: if r, ok := waitRecv[s.id]; ok { // meet! delete(waitRecv, s.id) r.ch <- s.f } else { // waiting for r waitSend[s.id] = s } case r := <-recvc: if s, ok := waitSend[r.id]; ok { // meet! delete(waitSend, r.id) r.ch <- s.f } else { // waiting for s waitRecv[r.id] = r } } } } var newContext = appengine.NewContext // for testing func handleBackground(w http.ResponseWriter, req *http.Request) { id := req.Header.Get("X-AppEngine-BackgroundRequest") ch := make(chan func(context.Context)) recvc <- recv{id, ch} (<-ch)(newContext(req)) } // RunInBackground runs f in a background goroutine in this process. // f is provided a context that may outlast the context provided to RunInBackground. // This is only valid to invoke from a manually scaled module. func RunInBackground(c context.Context, f func(c context.Context)) error { req := &pb.StartBackgroundRequestRequest{} res := &pb.StartBackgroundRequestResponse{} if err := internal.Call(c, "system", "StartBackgroundRequest", req, res); err != nil { return err } sendc <- send{res.GetRequestId(), f} return nil } func init() { internal.RegisterErrorCodeMap("system", pb.SystemServiceError_ErrorCode_name) } ================================================ FILE: vendor/google.golang.org/appengine/search/doc.go ================================================ // Copyright 2015 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. /* Package search provides a client for App Engine's search service. Basic Operations Indexes contain documents. Each index is identified by its name: a human-readable ASCII string. Within an index, documents are associated with an ID, which is also a human-readable ASCII string. A document's contents are a mapping from case-sensitive field names to values. Valid types for field values are: - string, - search.Atom, - search.HTML, - time.Time (stored with millisecond precision), - float64 (value between -2,147,483,647 and 2,147,483,647 inclusive), - appengine.GeoPoint. The Get and Put methods on an Index load and save a document. A document's contents are typically represented by a struct pointer. Example code: type Doc struct { Author string Comment string Creation time.Time } index, err := search.Open("comments") if err != nil { return err } newID, err := index.Put(ctx, "", &Doc{ Author: "gopher", Comment: "the truth of the matter", Creation: time.Now(), }) if err != nil { return err } A single document can be retrieved by its ID. Pass a destination struct to Get to hold the resulting document. var doc Doc err := index.Get(ctx, id, &doc) if err != nil { return err } Search and Listing Documents Indexes have two methods for retrieving multiple documents at once: Search and List. Searching an index for a query will result in an iterator. As with an iterator from package datastore, pass a destination struct to Next to decode the next result. Next will return Done when the iterator is exhausted. for t := index.Search(ctx, "Comment:truth", nil); ; { var doc Doc id, err := t.Next(&doc) if err == search.Done { break } if err != nil { return err } fmt.Fprintf(w, "%s -> %#v\n", id, doc) } Search takes a string query to determine which documents to return. The query can be simple, such as a single word to match, or complex. The query language is described at https://cloud.google.com/appengine/docs/go/search/query_strings Search also takes an optional SearchOptions struct which gives much more control over how results are calculated and returned. Call List to iterate over all documents in an index. for t := index.List(ctx, nil); ; { var doc Doc id, err := t.Next(&doc) if err == search.Done { break } if err != nil { return err } fmt.Fprintf(w, "%s -> %#v\n", id, doc) } Fields and Facets A document's contents can be represented by a variety of types. These are typically struct pointers, but they can also be represented by any type implementing the FieldLoadSaver interface. The FieldLoadSaver allows metadata to be set for the document with the DocumentMetadata type. Struct pointers are more strongly typed and are easier to use; FieldLoadSavers are more flexible. A document's contents can be expressed in two ways: fields and facets. Fields are the most common way of providing content for documents. Fields can store data in multiple types and can be matched in searches using query strings. Facets provide a way to attach categorical information to a document. The only valid types for facets are search.Atom and float64. Facets allow search results to contain summaries of the categories matched in a search, and to restrict searches to only match against specific categories. By default, for struct pointers, all of the struct fields are used as document fields, and the field name used is the same as on the struct (and hence must start with an upper case letter). Struct fields may have a `search:"name,options"` tag. The name must start with a letter and be composed only of word characters. If options is "facet" then the struct field will be used as a document facet. If options is "" then the comma may be omitted. There are no other recognized options. Example code: // A and B are renamed to a and b. // A, C and I are facets. // D's tag is equivalent to having no tag at all (E). // I has tag information for both the search and json packages. type TaggedStruct struct { A float64 `search:"a,facet"` B float64 `search:"b"` C float64 `search:",facet"` D float64 `search:""` E float64 I float64 `search:",facet" json:"i"` } The FieldLoadSaver Interface A document's contents can also be represented by any type that implements the FieldLoadSaver interface. This type may be a struct pointer, but it does not have to be. The search package will call Load when loading the document's contents, and Save when saving them. In addition to a slice of Fields, the Load and Save methods also use the DocumentMetadata type to provide additional information about a document (such as its Rank, or set of Facets). Possible uses for this interface include deriving non-stored fields, verifying fields or setting specific languages for string and HTML fields. Example code: type CustomFieldsExample struct { // Item's title and which language it is in. Title string Lang string // Mass, in grams. Mass int } func (x *CustomFieldsExample) Load(fields []search.Field, meta *search.DocumentMetadata) error { // Load the title field, failing if any other field is found. for _, f := range fields { if f.Name != "title" { return fmt.Errorf("unknown field %q", f.Name) } s, ok := f.Value.(string) if !ok { return fmt.Errorf("unsupported type %T for field %q", f.Value, f.Name) } x.Title = s x.Lang = f.Language } // Load the mass facet, failing if any other facet is found. for _, f := range meta.Facets { if f.Name != "mass" { return fmt.Errorf("unknown facet %q", f.Name) } m, ok := f.Value.(float64) if !ok { return fmt.Errorf("unsupported type %T for facet %q", f.Value, f.Name) } x.Mass = int(m) } return nil } func (x *CustomFieldsExample) Save() ([]search.Field, *search.DocumentMetadata, error) { fields := []search.Field{ {Name: "title", Value: x.Title, Language: x.Lang}, } meta := &search.DocumentMetadata{ Facets: { {Name: "mass", Value: float64(x.Mass)}, }, } return fields, meta, nil } */ package search ================================================ FILE: vendor/google.golang.org/appengine/search/field.go ================================================ // Copyright 2014 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. package search // Field is a name/value pair. A search index's document can be loaded and // saved as a sequence of Fields. type Field struct { // Name is the field name. A valid field name matches /[A-Za-z][A-Za-z0-9_]*/. Name string // Value is the field value. The valid types are: // - string, // - search.Atom, // - search.HTML, // - time.Time (stored with millisecond precision), // - float64, // - GeoPoint. Value interface{} // Language is a two-letter ISO 639-1 code for the field's language, // defaulting to "en" if nothing is specified. It may only be specified for // fields of type string and search.HTML. Language string // Derived marks fields that were calculated as a result of a // FieldExpression provided to Search. This field is ignored when saving a // document. Derived bool } // Facet is a name/value pair which is used to add categorical information to a // document. type Facet struct { // Name is the facet name. A valid facet name matches /[A-Za-z][A-Za-z0-9_]*/. // A facet name cannot be longer than 500 characters. Name string // Value is the facet value. // // When being used in documents (for example, in // DocumentMetadata.Facets), the valid types are: // - search.Atom, // - float64. // // When being used in SearchOptions.Refinements or being returned // in FacetResult, the valid types are: // - search.Atom, // - search.Range. Value interface{} } // DocumentMetadata is a struct containing information describing a given document. type DocumentMetadata struct { // Rank is an integer specifying the order the document will be returned in // search results. If zero, the rank will be set to the number of seconds since // 2011-01-01 00:00:00 UTC when being Put into an index. Rank int // Facets is the set of facets for this document. Facets []Facet } // FieldLoadSaver can be converted from and to a slice of Fields // with additional document metadata. type FieldLoadSaver interface { Load([]Field, *DocumentMetadata) error Save() ([]Field, *DocumentMetadata, error) } // FieldList converts a []Field to implement FieldLoadSaver. type FieldList []Field // Load loads all of the provided fields into l. // It does not first reset *l to an empty slice. func (l *FieldList) Load(f []Field, _ *DocumentMetadata) error { *l = append(*l, f...) return nil } // Save returns all of l's fields as a slice of Fields. func (l *FieldList) Save() ([]Field, *DocumentMetadata, error) { return *l, nil, nil } var _ FieldLoadSaver = (*FieldList)(nil) ================================================ FILE: vendor/google.golang.org/appengine/search/search.go ================================================ // Copyright 2012 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. package search // TODO: let Put specify the document language: "en", "fr", etc. Also: order_id?? storage?? // TODO: Index.GetAll (or Iterator.GetAll)? // TODO: struct <-> protobuf tests. // TODO: enforce Python's MIN_NUMBER_VALUE and MIN_DATE (which would disallow a zero // time.Time)? _MAXIMUM_STRING_LENGTH? import ( "errors" "fmt" "math" "reflect" "regexp" "strconv" "strings" "time" "unicode/utf8" "github.com/golang/protobuf/proto" "golang.org/x/net/context" "google.golang.org/appengine" "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/search" ) var ( // ErrInvalidDocumentType is returned when methods like Put, Get or Next // are passed a dst or src argument of invalid type. ErrInvalidDocumentType = errors.New("search: invalid document type") // ErrNoSuchDocument is returned when no document was found for a given ID. ErrNoSuchDocument = errors.New("search: no such document") ) // Atom is a document field whose contents are indexed as a single indivisible // string. type Atom string // HTML is a document field whose contents are indexed as HTML. Only text nodes // are indexed: "foobar" will be treated as "foobar". type HTML string // validIndexNameOrDocID is the Go equivalent of Python's // _ValidateVisiblePrintableAsciiNotReserved. func validIndexNameOrDocID(s string) bool { if strings.HasPrefix(s, "!") { return false } for _, c := range s { if c < 0x21 || 0x7f <= c { return false } } return true } var ( fieldNameRE = regexp.MustCompile(`^[A-Za-z][A-Za-z0-9_]*$`) languageRE = regexp.MustCompile(`^[a-z]{2}$`) ) // validFieldName is the Go equivalent of Python's _CheckFieldName. It checks // the validity of both field and facet names. func validFieldName(s string) bool { return len(s) <= 500 && fieldNameRE.MatchString(s) } // validDocRank checks that the ranks is in the range [0, 2^31). func validDocRank(r int) bool { return 0 <= r && r <= (1<<31-1) } // validLanguage checks that a language looks like ISO 639-1. func validLanguage(s string) bool { return languageRE.MatchString(s) } // validFloat checks that f is in the range [-2147483647, 2147483647]. func validFloat(f float64) bool { return -(1<<31-1) <= f && f <= (1<<31-1) } // Index is an index of documents. type Index struct { spec pb.IndexSpec } // orderIDEpoch forms the basis for populating OrderId on documents. var orderIDEpoch = time.Date(2011, 1, 1, 0, 0, 0, 0, time.UTC) // Open opens the index with the given name. The index is created if it does // not already exist. // // The name is a human-readable ASCII string. It must contain no whitespace // characters and not start with "!". func Open(name string) (*Index, error) { if !validIndexNameOrDocID(name) { return nil, fmt.Errorf("search: invalid index name %q", name) } return &Index{ spec: pb.IndexSpec{ Name: &name, }, }, nil } // Put saves src to the index. If id is empty, a new ID is allocated by the // service and returned. If id is not empty, any existing index entry for that // ID is replaced. // // The ID is a human-readable ASCII string. It must contain no whitespace // characters and not start with "!". // // src must be a non-nil struct pointer or implement the FieldLoadSaver // interface. func (x *Index) Put(c context.Context, id string, src interface{}) (string, error) { d, err := saveDoc(src) if err != nil { return "", err } if id != "" { if !validIndexNameOrDocID(id) { return "", fmt.Errorf("search: invalid ID %q", id) } d.Id = proto.String(id) } req := &pb.IndexDocumentRequest{ Params: &pb.IndexDocumentParams{ Document: []*pb.Document{d}, IndexSpec: &x.spec, }, } res := &pb.IndexDocumentResponse{} if err := internal.Call(c, "search", "IndexDocument", req, res); err != nil { return "", err } if len(res.Status) > 0 { if s := res.Status[0]; s.GetCode() != pb.SearchServiceError_OK { return "", fmt.Errorf("search: %s: %s", s.GetCode(), s.GetErrorDetail()) } } if len(res.Status) != 1 || len(res.DocId) != 1 { return "", fmt.Errorf("search: internal error: wrong number of results (%d Statuses, %d DocIDs)", len(res.Status), len(res.DocId)) } return res.DocId[0], nil } // Get loads the document with the given ID into dst. // // The ID is a human-readable ASCII string. It must be non-empty, contain no // whitespace characters and not start with "!". // // dst must be a non-nil struct pointer or implement the FieldLoadSaver // interface. // // ErrFieldMismatch is returned when a field is to be loaded into a different // type than the one it was stored from, or when a field is missing or // unexported in the destination struct. ErrFieldMismatch is only returned if // dst is a struct pointer. It is up to the callee to decide whether this error // is fatal, recoverable, or ignorable. func (x *Index) Get(c context.Context, id string, dst interface{}) error { if id == "" || !validIndexNameOrDocID(id) { return fmt.Errorf("search: invalid ID %q", id) } req := &pb.ListDocumentsRequest{ Params: &pb.ListDocumentsParams{ IndexSpec: &x.spec, StartDocId: proto.String(id), Limit: proto.Int32(1), }, } res := &pb.ListDocumentsResponse{} if err := internal.Call(c, "search", "ListDocuments", req, res); err != nil { return err } if res.Status == nil || res.Status.GetCode() != pb.SearchServiceError_OK { return fmt.Errorf("search: %s: %s", res.Status.GetCode(), res.Status.GetErrorDetail()) } if len(res.Document) != 1 || res.Document[0].GetId() != id { return ErrNoSuchDocument } return loadDoc(dst, res.Document[0], nil) } // Delete deletes a document from the index. func (x *Index) Delete(c context.Context, id string) error { req := &pb.DeleteDocumentRequest{ Params: &pb.DeleteDocumentParams{ DocId: []string{id}, IndexSpec: &x.spec, }, } res := &pb.DeleteDocumentResponse{} if err := internal.Call(c, "search", "DeleteDocument", req, res); err != nil { return err } if len(res.Status) != 1 { return fmt.Errorf("search: internal error: wrong number of results (%d)", len(res.Status)) } if s := res.Status[0]; s.GetCode() != pb.SearchServiceError_OK { return fmt.Errorf("search: %s: %s", s.GetCode(), s.GetErrorDetail()) } return nil } // List lists all of the documents in an index. The documents are returned in // increasing ID order. func (x *Index) List(c context.Context, opts *ListOptions) *Iterator { t := &Iterator{ c: c, index: x, count: -1, listInclusive: true, more: moreList, } if opts != nil { t.listStartID = opts.StartID t.limit = opts.Limit t.idsOnly = opts.IDsOnly } return t } func moreList(t *Iterator) error { req := &pb.ListDocumentsRequest{ Params: &pb.ListDocumentsParams{ IndexSpec: &t.index.spec, }, } if t.listStartID != "" { req.Params.StartDocId = &t.listStartID req.Params.IncludeStartDoc = &t.listInclusive } if t.limit > 0 { req.Params.Limit = proto.Int32(int32(t.limit)) } if t.idsOnly { req.Params.KeysOnly = &t.idsOnly } res := &pb.ListDocumentsResponse{} if err := internal.Call(t.c, "search", "ListDocuments", req, res); err != nil { return err } if res.Status == nil || res.Status.GetCode() != pb.SearchServiceError_OK { return fmt.Errorf("search: %s: %s", res.Status.GetCode(), res.Status.GetErrorDetail()) } t.listRes = res.Document t.listStartID, t.listInclusive, t.more = "", false, nil if len(res.Document) != 0 && t.limit <= 0 { if id := res.Document[len(res.Document)-1].GetId(); id != "" { t.listStartID, t.more = id, moreList } } return nil } // ListOptions are the options for listing documents in an index. Passing a nil // *ListOptions is equivalent to using the default values. type ListOptions struct { // StartID is the inclusive lower bound for the ID of the returned // documents. The zero value means all documents will be returned. StartID string // Limit is the maximum number of documents to return. The zero value // indicates no limit. Limit int // IDsOnly indicates that only document IDs should be returned for the list // operation; no document fields are populated. IDsOnly bool } // Search searches the index for the given query. func (x *Index) Search(c context.Context, query string, opts *SearchOptions) *Iterator { t := &Iterator{ c: c, index: x, searchQuery: query, more: moreSearch, } if opts != nil { if opts.Cursor != "" { if opts.Offset != 0 { return errIter("at most one of Cursor and Offset may be specified") } t.searchCursor = proto.String(string(opts.Cursor)) } t.limit = opts.Limit t.fields = opts.Fields t.idsOnly = opts.IDsOnly t.sort = opts.Sort t.exprs = opts.Expressions t.refinements = opts.Refinements t.facetOpts = opts.Facets t.searchOffset = opts.Offset } return t } func moreSearch(t *Iterator) error { // We use per-result (rather than single/per-page) cursors since this // lets us return a Cursor for every iterator document. The two cursor // types are largely interchangeable: a page cursor is the same as the // last per-result cursor in a given search response. req := &pb.SearchRequest{ Params: &pb.SearchParams{ IndexSpec: &t.index.spec, Query: &t.searchQuery, Cursor: t.searchCursor, CursorType: pb.SearchParams_PER_RESULT.Enum(), FieldSpec: &pb.FieldSpec{ Name: t.fields, }, }, } if t.limit > 0 { req.Params.Limit = proto.Int32(int32(t.limit)) } if t.searchOffset > 0 { req.Params.Offset = proto.Int32(int32(t.searchOffset)) t.searchOffset = 0 } if t.idsOnly { req.Params.KeysOnly = &t.idsOnly } if t.sort != nil { if err := sortToProto(t.sort, req.Params); err != nil { return err } } if t.refinements != nil { if err := refinementsToProto(t.refinements, req.Params); err != nil { return err } } for _, e := range t.exprs { req.Params.FieldSpec.Expression = append(req.Params.FieldSpec.Expression, &pb.FieldSpec_Expression{ Name: proto.String(e.Name), Expression: proto.String(e.Expr), }) } for _, f := range t.facetOpts { if err := f.setParams(req.Params); err != nil { return fmt.Errorf("bad FacetSearchOption: %v", err) } } // Don't repeat facet search. t.facetOpts = nil res := &pb.SearchResponse{} if err := internal.Call(t.c, "search", "Search", req, res); err != nil { return err } if res.Status == nil || res.Status.GetCode() != pb.SearchServiceError_OK { return fmt.Errorf("search: %s: %s", res.Status.GetCode(), res.Status.GetErrorDetail()) } t.searchRes = res.Result if len(res.FacetResult) > 0 { t.facetRes = res.FacetResult } t.count = int(*res.MatchedCount) if t.limit > 0 { t.more = nil } else { t.more = moreSearch } return nil } // SearchOptions are the options for searching an index. Passing a nil // *SearchOptions is equivalent to using the default values. type SearchOptions struct { // Limit is the maximum number of documents to return. The zero value // indicates no limit. Limit int // IDsOnly indicates that only document IDs should be returned for the search // operation; no document fields are populated. IDsOnly bool // Sort controls the ordering of search results. Sort *SortOptions // Fields specifies which document fields to include in the results. If omitted, // all document fields are returned. No more than 100 fields may be specified. Fields []string // Expressions specifies additional computed fields to add to each returned // document. Expressions []FieldExpression // Facets controls what facet information is returned for these search results. // If no options are specified, no facet results will be returned. Facets []FacetSearchOption // Refinements filters the returned documents by requiring them to contain facets // with specific values. Refinements are applied in conjunction for facets with // different names, and in disjunction otherwise. Refinements []Facet // Cursor causes the results to commence with the first document after // the document associated with the cursor. Cursor Cursor // Offset specifies the number of documents to skip over before returning results. // When specified, Cursor must be nil. Offset int } // Cursor represents an iterator's position. // // The string value of a cursor is web-safe. It can be saved and restored // for later use. type Cursor string // FieldExpression defines a custom expression to evaluate for each result. type FieldExpression struct { // Name is the name to use for the computed field. Name string // Expr is evaluated to provide a custom content snippet for each document. // See https://cloud.google.com/appengine/docs/go/search/options for // the supported expression syntax. Expr string } // FacetSearchOption controls what facet information is returned in search results. type FacetSearchOption interface { setParams(*pb.SearchParams) error } // AutoFacetDiscovery returns a FacetSearchOption which enables automatic facet // discovery for the search. Automatic facet discovery looks for the facets // which appear the most often in the aggregate in the matched documents. // // The maximum number of facets returned is controlled by facetLimit, and the // maximum number of values per facet by facetLimit. A limit of zero indicates // a default limit should be used. func AutoFacetDiscovery(facetLimit, valueLimit int) FacetSearchOption { return &autoFacetOpt{facetLimit, valueLimit} } type autoFacetOpt struct { facetLimit, valueLimit int } const defaultAutoFacetLimit = 10 // As per python runtime search.py. func (o *autoFacetOpt) setParams(params *pb.SearchParams) error { lim := int32(o.facetLimit) if lim == 0 { lim = defaultAutoFacetLimit } params.AutoDiscoverFacetCount = &lim if o.valueLimit > 0 { params.FacetAutoDetectParam = &pb.FacetAutoDetectParam{ ValueLimit: proto.Int32(int32(o.valueLimit)), } } return nil } // FacetDiscovery returns a FacetSearchOption which selects a facet to be // returned with the search results. By default, the most frequently // occurring values for that facet will be returned. However, you can also // specify a list of particular Atoms or specific Ranges to return. func FacetDiscovery(name string, value ...interface{}) FacetSearchOption { return &facetOpt{name, value} } type facetOpt struct { name string values []interface{} } func (o *facetOpt) setParams(params *pb.SearchParams) error { req := &pb.FacetRequest{Name: &o.name} params.IncludeFacet = append(params.IncludeFacet, req) if len(o.values) == 0 { return nil } vtype := reflect.TypeOf(o.values[0]) reqParam := &pb.FacetRequestParam{} for _, v := range o.values { if reflect.TypeOf(v) != vtype { return errors.New("values must all be Atom, or must all be Range") } switch v := v.(type) { case Atom: reqParam.ValueConstraint = append(reqParam.ValueConstraint, string(v)) case Range: rng, err := rangeToProto(v) if err != nil { return fmt.Errorf("invalid range: %v", err) } reqParam.Range = append(reqParam.Range, rng) default: return fmt.Errorf("unsupported value type %T", v) } } req.Params = reqParam return nil } // FacetDocumentDepth returns a FacetSearchOption which controls the number of // documents to be evaluated with preparing facet results. func FacetDocumentDepth(depth int) FacetSearchOption { return facetDepthOpt(depth) } type facetDepthOpt int func (o facetDepthOpt) setParams(params *pb.SearchParams) error { params.FacetDepth = proto.Int32(int32(o)) return nil } // FacetResult represents the number of times a particular facet and value // appeared in the documents matching a search request. type FacetResult struct { Facet // Count is the number of times this specific facet and value appeared in the // matching documents. Count int } // Range represents a numeric range with inclusive start and exclusive end. // Start may be specified as math.Inf(-1) to indicate there is no minimum // value, and End may similarly be specified as math.Inf(1); at least one of // Start or End must be a finite number. type Range struct { Start, End float64 } var ( negInf = math.Inf(-1) posInf = math.Inf(1) ) // AtLeast returns a Range matching any value greater than, or equal to, min. func AtLeast(min float64) Range { return Range{Start: min, End: posInf} } // LessThan returns a Range matching any value less than max. func LessThan(max float64) Range { return Range{Start: negInf, End: max} } // SortOptions control the ordering and scoring of search results. type SortOptions struct { // Expressions is a slice of expressions representing a multi-dimensional // sort. Expressions []SortExpression // Scorer, when specified, will cause the documents to be scored according to // search term frequency. Scorer Scorer // Limit is the maximum number of objects to score and/or sort. Limit cannot // be more than 10,000. The zero value indicates a default limit. Limit int } // SortExpression defines a single dimension for sorting a document. type SortExpression struct { // Expr is evaluated to provide a sorting value for each document. // See https://cloud.google.com/appengine/docs/go/search/options for // the supported expression syntax. Expr string // Reverse causes the documents to be sorted in ascending order. Reverse bool // The default value to use when no field is present or the expresion // cannot be calculated for a document. For text sorts, Default must // be of type string; for numeric sorts, float64. Default interface{} } // A Scorer defines how a document is scored. type Scorer interface { toProto(*pb.ScorerSpec) } type enumScorer struct { enum pb.ScorerSpec_Scorer } func (e enumScorer) toProto(spec *pb.ScorerSpec) { spec.Scorer = e.enum.Enum() } var ( // MatchScorer assigns a score based on term frequency in a document. MatchScorer Scorer = enumScorer{pb.ScorerSpec_MATCH_SCORER} // RescoringMatchScorer assigns a score based on the quality of the query // match. It is similar to a MatchScorer but uses a more complex scoring // algorithm based on match term frequency and other factors like field type. // Please be aware that this algorithm is continually refined and can change // over time without notice. This means that the ordering of search results // that use this scorer can also change without notice. RescoringMatchScorer Scorer = enumScorer{pb.ScorerSpec_RESCORING_MATCH_SCORER} ) func sortToProto(sort *SortOptions, params *pb.SearchParams) error { for _, e := range sort.Expressions { spec := &pb.SortSpec{ SortExpression: proto.String(e.Expr), } if e.Reverse { spec.SortDescending = proto.Bool(false) } if e.Default != nil { switch d := e.Default.(type) { case float64: spec.DefaultValueNumeric = &d case string: spec.DefaultValueText = &d default: return fmt.Errorf("search: invalid Default type %T for expression %q", d, e.Expr) } } params.SortSpec = append(params.SortSpec, spec) } spec := &pb.ScorerSpec{} if sort.Limit > 0 { spec.Limit = proto.Int32(int32(sort.Limit)) params.ScorerSpec = spec } if sort.Scorer != nil { sort.Scorer.toProto(spec) params.ScorerSpec = spec } return nil } func refinementsToProto(refinements []Facet, params *pb.SearchParams) error { for _, r := range refinements { ref := &pb.FacetRefinement{ Name: proto.String(r.Name), } switch v := r.Value.(type) { case Atom: ref.Value = proto.String(string(v)) case Range: rng, err := rangeToProto(v) if err != nil { return fmt.Errorf("search: refinement for facet %q: %v", r.Name, err) } // Unfortunately there are two identical messages for identify Facet ranges. ref.Range = &pb.FacetRefinement_Range{Start: rng.Start, End: rng.End} default: return fmt.Errorf("search: unsupported refinement for facet %q of type %T", r.Name, v) } params.FacetRefinement = append(params.FacetRefinement, ref) } return nil } func rangeToProto(r Range) (*pb.FacetRange, error) { rng := &pb.FacetRange{} if r.Start != negInf { if !validFloat(r.Start) { return nil, errors.New("invalid value for Start") } rng.Start = proto.String(strconv.FormatFloat(r.Start, 'e', -1, 64)) } else if r.End == posInf { return nil, errors.New("either Start or End must be finite") } if r.End != posInf { if !validFloat(r.End) { return nil, errors.New("invalid value for End") } rng.End = proto.String(strconv.FormatFloat(r.End, 'e', -1, 64)) } return rng, nil } func protoToRange(rng *pb.FacetRefinement_Range) Range { r := Range{Start: negInf, End: posInf} if x, err := strconv.ParseFloat(rng.GetStart(), 64); err != nil { r.Start = x } if x, err := strconv.ParseFloat(rng.GetEnd(), 64); err != nil { r.End = x } return r } // Iterator is the result of searching an index for a query or listing an // index. type Iterator struct { c context.Context index *Index err error listRes []*pb.Document listStartID string listInclusive bool searchRes []*pb.SearchResult facetRes []*pb.FacetResult searchQuery string searchCursor *string searchOffset int sort *SortOptions fields []string exprs []FieldExpression refinements []Facet facetOpts []FacetSearchOption more func(*Iterator) error count int limit int // items left to return; 0 for unlimited. idsOnly bool } // errIter returns an iterator that only returns the given error. func errIter(err string) *Iterator { return &Iterator{ err: errors.New(err), } } // Done is returned when a query iteration has completed. var Done = errors.New("search: query has no more results") // Count returns an approximation of the number of documents matched by the // query. It is only valid to call for iterators returned by Search. func (t *Iterator) Count() int { return t.count } // fetchMore retrieves more results, if there are no errors or pending results. func (t *Iterator) fetchMore() { if t.err == nil && len(t.listRes)+len(t.searchRes) == 0 && t.more != nil { t.err = t.more(t) } } // Next returns the ID of the next result. When there are no more results, // Done is returned as the error. // // dst must be a non-nil struct pointer, implement the FieldLoadSaver // interface, or be a nil interface value. If a non-nil dst is provided, it // will be filled with the indexed fields. dst is ignored if this iterator was // created with an IDsOnly option. func (t *Iterator) Next(dst interface{}) (string, error) { t.fetchMore() if t.err != nil { return "", t.err } var doc *pb.Document var exprs []*pb.Field switch { case len(t.listRes) != 0: doc = t.listRes[0] t.listRes = t.listRes[1:] case len(t.searchRes) != 0: doc = t.searchRes[0].Document exprs = t.searchRes[0].Expression t.searchCursor = t.searchRes[0].Cursor t.searchRes = t.searchRes[1:] default: return "", Done } if doc == nil { return "", errors.New("search: internal error: no document returned") } if !t.idsOnly && dst != nil { if err := loadDoc(dst, doc, exprs); err != nil { return "", err } } return doc.GetId(), nil } // Cursor returns the cursor associated with the current document (that is, // the document most recently returned by a call to Next). // // Passing this cursor in a future call to Search will cause those results // to commence with the first document after the current document. func (t *Iterator) Cursor() Cursor { if t.searchCursor == nil { return "" } return Cursor(*t.searchCursor) } // Facets returns the facets found within the search results, if any facets // were requested in the SearchOptions. func (t *Iterator) Facets() ([][]FacetResult, error) { t.fetchMore() if t.err != nil && t.err != Done { return nil, t.err } var facets [][]FacetResult for _, f := range t.facetRes { fres := make([]FacetResult, 0, len(f.Value)) for _, v := range f.Value { ref := v.Refinement facet := FacetResult{ Facet: Facet{Name: ref.GetName()}, Count: int(v.GetCount()), } if ref.Value != nil { facet.Value = Atom(*ref.Value) } else { facet.Value = protoToRange(ref.Range) } fres = append(fres, facet) } facets = append(facets, fres) } return facets, nil } // saveDoc converts from a struct pointer or // FieldLoadSaver/FieldMetadataLoadSaver to the Document protobuf. func saveDoc(src interface{}) (*pb.Document, error) { var err error var fields []Field var meta *DocumentMetadata switch x := src.(type) { case FieldLoadSaver: fields, meta, err = x.Save() default: fields, err = SaveStruct(src) } if err != nil { return nil, err } fieldsProto, err := fieldsToProto(fields) if err != nil { return nil, err } d := &pb.Document{ Field: fieldsProto, OrderId: proto.Int32(int32(time.Since(orderIDEpoch).Seconds())), } if meta != nil { if meta.Rank != 0 { if !validDocRank(meta.Rank) { return nil, fmt.Errorf("search: invalid rank %d, must be [0, 2^31)", meta.Rank) } *d.OrderId = int32(meta.Rank) } if len(meta.Facets) > 0 { facets, err := facetsToProto(meta.Facets) if err != nil { return nil, err } d.Facet = facets } } return d, nil } func fieldsToProto(src []Field) ([]*pb.Field, error) { // Maps to catch duplicate time or numeric fields. timeFields, numericFields := make(map[string]bool), make(map[string]bool) dst := make([]*pb.Field, 0, len(src)) for _, f := range src { if !validFieldName(f.Name) { return nil, fmt.Errorf("search: invalid field name %q", f.Name) } fieldValue := &pb.FieldValue{} switch x := f.Value.(type) { case string: fieldValue.Type = pb.FieldValue_TEXT.Enum() fieldValue.StringValue = proto.String(x) case Atom: fieldValue.Type = pb.FieldValue_ATOM.Enum() fieldValue.StringValue = proto.String(string(x)) case HTML: fieldValue.Type = pb.FieldValue_HTML.Enum() fieldValue.StringValue = proto.String(string(x)) case time.Time: if timeFields[f.Name] { return nil, fmt.Errorf("search: duplicate time field %q", f.Name) } timeFields[f.Name] = true fieldValue.Type = pb.FieldValue_DATE.Enum() fieldValue.StringValue = proto.String(strconv.FormatInt(x.UnixNano()/1e6, 10)) case float64: if numericFields[f.Name] { return nil, fmt.Errorf("search: duplicate numeric field %q", f.Name) } if !validFloat(x) { return nil, fmt.Errorf("search: numeric field %q with invalid value %f", f.Name, x) } numericFields[f.Name] = true fieldValue.Type = pb.FieldValue_NUMBER.Enum() fieldValue.StringValue = proto.String(strconv.FormatFloat(x, 'e', -1, 64)) case appengine.GeoPoint: if !x.Valid() { return nil, fmt.Errorf( "search: GeoPoint field %q with invalid value %v", f.Name, x) } fieldValue.Type = pb.FieldValue_GEO.Enum() fieldValue.Geo = &pb.FieldValue_Geo{ Lat: proto.Float64(x.Lat), Lng: proto.Float64(x.Lng), } default: return nil, fmt.Errorf("search: unsupported field type: %v", reflect.TypeOf(f.Value)) } if f.Language != "" { switch f.Value.(type) { case string, HTML: if !validLanguage(f.Language) { return nil, fmt.Errorf("search: invalid language for field %q: %q", f.Name, f.Language) } fieldValue.Language = proto.String(f.Language) default: return nil, fmt.Errorf("search: setting language not supported for field %q of type %T", f.Name, f.Value) } } if p := fieldValue.StringValue; p != nil && !utf8.ValidString(*p) { return nil, fmt.Errorf("search: %q field is invalid UTF-8: %q", f.Name, *p) } dst = append(dst, &pb.Field{ Name: proto.String(f.Name), Value: fieldValue, }) } return dst, nil } func facetsToProto(src []Facet) ([]*pb.Facet, error) { dst := make([]*pb.Facet, 0, len(src)) for _, f := range src { if !validFieldName(f.Name) { return nil, fmt.Errorf("search: invalid facet name %q", f.Name) } facetValue := &pb.FacetValue{} switch x := f.Value.(type) { case Atom: if !utf8.ValidString(string(x)) { return nil, fmt.Errorf("search: %q facet is invalid UTF-8: %q", f.Name, x) } facetValue.Type = pb.FacetValue_ATOM.Enum() facetValue.StringValue = proto.String(string(x)) case float64: if !validFloat(x) { return nil, fmt.Errorf("search: numeric facet %q with invalid value %f", f.Name, x) } facetValue.Type = pb.FacetValue_NUMBER.Enum() facetValue.StringValue = proto.String(strconv.FormatFloat(x, 'e', -1, 64)) default: return nil, fmt.Errorf("search: unsupported facet type: %v", reflect.TypeOf(f.Value)) } dst = append(dst, &pb.Facet{ Name: proto.String(f.Name), Value: facetValue, }) } return dst, nil } // loadDoc converts from protobufs to a struct pointer or // FieldLoadSaver/FieldMetadataLoadSaver. The src param provides the document's // stored fields and facets, and any document metadata. An additional slice of // fields, exprs, may optionally be provided to contain any derived expressions // requested by the developer. func loadDoc(dst interface{}, src *pb.Document, exprs []*pb.Field) (err error) { fields, err := protoToFields(src.Field) if err != nil { return err } facets, err := protoToFacets(src.Facet) if err != nil { return err } if len(exprs) > 0 { exprFields, err := protoToFields(exprs) if err != nil { return err } // Mark each field as derived. for i := range exprFields { exprFields[i].Derived = true } fields = append(fields, exprFields...) } meta := &DocumentMetadata{ Rank: int(src.GetOrderId()), Facets: facets, } switch x := dst.(type) { case FieldLoadSaver: return x.Load(fields, meta) default: return loadStructWithMeta(dst, fields, meta) } } func protoToFields(fields []*pb.Field) ([]Field, error) { dst := make([]Field, 0, len(fields)) for _, field := range fields { fieldValue := field.GetValue() f := Field{ Name: field.GetName(), } switch fieldValue.GetType() { case pb.FieldValue_TEXT: f.Value = fieldValue.GetStringValue() f.Language = fieldValue.GetLanguage() case pb.FieldValue_ATOM: f.Value = Atom(fieldValue.GetStringValue()) case pb.FieldValue_HTML: f.Value = HTML(fieldValue.GetStringValue()) f.Language = fieldValue.GetLanguage() case pb.FieldValue_DATE: sv := fieldValue.GetStringValue() millis, err := strconv.ParseInt(sv, 10, 64) if err != nil { return nil, fmt.Errorf("search: internal error: bad time.Time encoding %q: %v", sv, err) } f.Value = time.Unix(0, millis*1e6) case pb.FieldValue_NUMBER: sv := fieldValue.GetStringValue() x, err := strconv.ParseFloat(sv, 64) if err != nil { return nil, err } f.Value = x case pb.FieldValue_GEO: geoValue := fieldValue.GetGeo() geoPoint := appengine.GeoPoint{geoValue.GetLat(), geoValue.GetLng()} if !geoPoint.Valid() { return nil, fmt.Errorf("search: internal error: invalid GeoPoint encoding: %v", geoPoint) } f.Value = geoPoint default: return nil, fmt.Errorf("search: internal error: unknown data type %s", fieldValue.GetType()) } dst = append(dst, f) } return dst, nil } func protoToFacets(facets []*pb.Facet) ([]Facet, error) { if len(facets) == 0 { return nil, nil } dst := make([]Facet, 0, len(facets)) for _, facet := range facets { facetValue := facet.GetValue() f := Facet{ Name: facet.GetName(), } switch facetValue.GetType() { case pb.FacetValue_ATOM: f.Value = Atom(facetValue.GetStringValue()) case pb.FacetValue_NUMBER: sv := facetValue.GetStringValue() x, err := strconv.ParseFloat(sv, 64) if err != nil { return nil, err } f.Value = x default: return nil, fmt.Errorf("search: internal error: unknown data type %s", facetValue.GetType()) } dst = append(dst, f) } return dst, nil } func namespaceMod(m proto.Message, namespace string) { set := func(s **string) { if *s == nil { *s = &namespace } } switch m := m.(type) { case *pb.IndexDocumentRequest: set(&m.Params.IndexSpec.Namespace) case *pb.ListDocumentsRequest: set(&m.Params.IndexSpec.Namespace) case *pb.DeleteDocumentRequest: set(&m.Params.IndexSpec.Namespace) case *pb.SearchRequest: set(&m.Params.IndexSpec.Namespace) } } func init() { internal.RegisterErrorCodeMap("search", pb.SearchServiceError_ErrorCode_name) internal.NamespaceMods["search"] = namespaceMod } ================================================ FILE: vendor/google.golang.org/appengine/search/struct.go ================================================ // Copyright 2015 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. package search import ( "fmt" "reflect" "strings" "sync" ) // ErrFieldMismatch is returned when a field is to be loaded into a different // than the one it was stored from, or when a field is missing or unexported in // the destination struct. type ErrFieldMismatch struct { FieldName string Reason string } func (e *ErrFieldMismatch) Error() string { return fmt.Sprintf("search: cannot load field %q: %s", e.FieldName, e.Reason) } // ErrFacetMismatch is returned when a facet is to be loaded into a different // type than the one it was stored from, or when a field is missing or // unexported in the destination struct. StructType is the type of the struct // pointed to by the destination argument passed to Iterator.Next. type ErrFacetMismatch struct { StructType reflect.Type FacetName string Reason string } func (e *ErrFacetMismatch) Error() string { return fmt.Sprintf("search: cannot load facet %q into a %q: %s", e.FacetName, e.StructType, e.Reason) } // structCodec defines how to convert a given struct to/from a search document. type structCodec struct { // byIndex returns the struct tag for the i'th struct field. byIndex []structTag // fieldByName returns the index of the struct field for the given field name. fieldByName map[string]int // facetByName returns the index of the struct field for the given facet name, facetByName map[string]int } // structTag holds a structured version of each struct field's parsed tag. type structTag struct { name string facet bool } var ( codecsMu sync.RWMutex codecs = map[reflect.Type]*structCodec{} ) func loadCodec(t reflect.Type) (*structCodec, error) { codecsMu.RLock() codec, ok := codecs[t] codecsMu.RUnlock() if ok { return codec, nil } codecsMu.Lock() defer codecsMu.Unlock() if codec, ok := codecs[t]; ok { return codec, nil } codec = &structCodec{ fieldByName: make(map[string]int), facetByName: make(map[string]int), } for i, I := 0, t.NumField(); i < I; i++ { f := t.Field(i) name, opts := f.Tag.Get("search"), "" if i := strings.Index(name, ","); i != -1 { name, opts = name[:i], name[i+1:] } // TODO(davidday): Support name=="-" as per datastore. if name == "" { name = f.Name } else if !validFieldName(name) { return nil, fmt.Errorf("search: struct tag has invalid field name: %q", name) } facet := opts == "facet" codec.byIndex = append(codec.byIndex, structTag{name: name, facet: facet}) if facet { codec.facetByName[name] = i } else { codec.fieldByName[name] = i } } codecs[t] = codec return codec, nil } // structFLS adapts a struct to be a FieldLoadSaver. type structFLS struct { v reflect.Value codec *structCodec } func (s structFLS) Load(fields []Field, meta *DocumentMetadata) error { var err error for _, field := range fields { i, ok := s.codec.fieldByName[field.Name] if !ok { // Note the error, but keep going. err = &ErrFieldMismatch{ FieldName: field.Name, Reason: "no such struct field", } continue } f := s.v.Field(i) if !f.CanSet() { // Note the error, but keep going. err = &ErrFieldMismatch{ FieldName: field.Name, Reason: "cannot set struct field", } continue } v := reflect.ValueOf(field.Value) if ft, vt := f.Type(), v.Type(); ft != vt { err = &ErrFieldMismatch{ FieldName: field.Name, Reason: fmt.Sprintf("type mismatch: %v for %v data", ft, vt), } continue } f.Set(v) } if meta == nil { return nil } for _, facet := range meta.Facets { i, ok := s.codec.facetByName[facet.Name] if !ok { // Note the error, but keep going. if err == nil { err = &ErrFacetMismatch{ StructType: s.v.Type(), FacetName: facet.Name, Reason: "no matching field found", } } continue } f := s.v.Field(i) if !f.CanSet() { // Note the error, but keep going. if err == nil { err = &ErrFacetMismatch{ StructType: s.v.Type(), FacetName: facet.Name, Reason: "unable to set unexported field of struct", } } continue } v := reflect.ValueOf(facet.Value) if ft, vt := f.Type(), v.Type(); ft != vt { if err == nil { err = &ErrFacetMismatch{ StructType: s.v.Type(), FacetName: facet.Name, Reason: fmt.Sprintf("type mismatch: %v for %d data", ft, vt), } continue } } f.Set(v) } return err } func (s structFLS) Save() ([]Field, *DocumentMetadata, error) { fields := make([]Field, 0, len(s.codec.fieldByName)) var facets []Facet for i, tag := range s.codec.byIndex { f := s.v.Field(i) if !f.CanSet() { continue } if tag.facet { facets = append(facets, Facet{Name: tag.name, Value: f.Interface()}) } else { fields = append(fields, Field{Name: tag.name, Value: f.Interface()}) } } return fields, &DocumentMetadata{Facets: facets}, nil } // newStructFLS returns a FieldLoadSaver for the struct pointer p. func newStructFLS(p interface{}) (FieldLoadSaver, error) { v := reflect.ValueOf(p) if v.Kind() != reflect.Ptr || v.IsNil() || v.Elem().Kind() != reflect.Struct { return nil, ErrInvalidDocumentType } codec, err := loadCodec(v.Elem().Type()) if err != nil { return nil, err } return structFLS{v.Elem(), codec}, nil } func loadStructWithMeta(dst interface{}, f []Field, meta *DocumentMetadata) error { x, err := newStructFLS(dst) if err != nil { return err } return x.Load(f, meta) } func saveStructWithMeta(src interface{}) ([]Field, *DocumentMetadata, error) { x, err := newStructFLS(src) if err != nil { return nil, nil, err } return x.Save() } // LoadStruct loads the fields from f to dst. dst must be a struct pointer. func LoadStruct(dst interface{}, f []Field) error { return loadStructWithMeta(dst, f, nil) } // SaveStruct returns the fields from src as a slice of Field. // src must be a struct pointer. func SaveStruct(src interface{}) ([]Field, error) { f, _, err := saveStructWithMeta(src) return f, err } ================================================ FILE: vendor/google.golang.org/appengine/socket/doc.go ================================================ // Copyright 2012 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. // Package socket provides outbound network sockets. // // This package is only required in the classic App Engine environment. // Applications running only in the Managed VM hosting environment should // use the standard library's net package. package socket ================================================ FILE: vendor/google.golang.org/appengine/socket/socket_classic.go ================================================ // Copyright 2012 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. // +build appengine package socket import ( "fmt" "io" "net" "strconv" "time" "github.com/golang/protobuf/proto" "golang.org/x/net/context" "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/socket" ) // Dial connects to the address addr on the network protocol. // The address format is host:port, where host may be a hostname or an IP address. // Known protocols are "tcp" and "udp". // The returned connection satisfies net.Conn, and is valid while ctx is valid; // if the connection is to be used after ctx becomes invalid, invoke SetContext // with the new context. func Dial(ctx context.Context, protocol, addr string) (*Conn, error) { return DialTimeout(ctx, protocol, addr, 0) } var ipFamilies = []pb.CreateSocketRequest_SocketFamily{ pb.CreateSocketRequest_IPv4, pb.CreateSocketRequest_IPv6, } // DialTimeout is like Dial but takes a timeout. // The timeout includes name resolution, if required. func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) { dialCtx := ctx // Used for dialing and name resolution, but not stored in the *Conn. if timeout > 0 { var cancel context.CancelFunc dialCtx, cancel = context.WithTimeout(ctx, timeout) defer cancel() } host, portStr, err := net.SplitHostPort(addr) if err != nil { return nil, err } port, err := strconv.Atoi(portStr) if err != nil { return nil, fmt.Errorf("socket: bad port %q: %v", portStr, err) } var prot pb.CreateSocketRequest_SocketProtocol switch protocol { case "tcp": prot = pb.CreateSocketRequest_TCP case "udp": prot = pb.CreateSocketRequest_UDP default: return nil, fmt.Errorf("socket: unknown protocol %q", protocol) } packedAddrs, resolved, err := resolve(dialCtx, ipFamilies, host) if err != nil { return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err) } if len(packedAddrs) == 0 { return nil, fmt.Errorf("no addresses for %q", host) } packedAddr := packedAddrs[0] // use first address fam := pb.CreateSocketRequest_IPv4 if len(packedAddr) == net.IPv6len { fam = pb.CreateSocketRequest_IPv6 } req := &pb.CreateSocketRequest{ Family: fam.Enum(), Protocol: prot.Enum(), RemoteIp: &pb.AddressPort{ Port: proto.Int32(int32(port)), PackedAddress: packedAddr, }, } if resolved { req.RemoteIp.HostnameHint = &host } res := &pb.CreateSocketReply{} if err := internal.Call(dialCtx, "remote_socket", "CreateSocket", req, res); err != nil { return nil, err } return &Conn{ ctx: ctx, desc: res.GetSocketDescriptor(), prot: prot, local: res.ProxyExternalIp, remote: req.RemoteIp, }, nil } // LookupIP returns the given host's IP addresses. func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) { packedAddrs, _, err := resolve(ctx, ipFamilies, host) if err != nil { return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err) } addrs = make([]net.IP, len(packedAddrs)) for i, pa := range packedAddrs { addrs[i] = net.IP(pa) } return addrs, nil } func resolve(ctx context.Context, fams []pb.CreateSocketRequest_SocketFamily, host string) ([][]byte, bool, error) { // Check if it's an IP address. if ip := net.ParseIP(host); ip != nil { if ip := ip.To4(); ip != nil { return [][]byte{ip}, false, nil } return [][]byte{ip}, false, nil } req := &pb.ResolveRequest{ Name: &host, AddressFamilies: fams, } res := &pb.ResolveReply{} if err := internal.Call(ctx, "remote_socket", "Resolve", req, res); err != nil { // XXX: need to map to pb.ResolveReply_ErrorCode? return nil, false, err } return res.PackedAddress, true, nil } // withDeadline is like context.WithDeadline, except it ignores the zero deadline. func withDeadline(parent context.Context, deadline time.Time) (context.Context, context.CancelFunc) { if deadline.IsZero() { return parent, func() {} } return context.WithDeadline(parent, deadline) } // Conn represents a socket connection. // It implements net.Conn. type Conn struct { ctx context.Context desc string offset int64 prot pb.CreateSocketRequest_SocketProtocol local, remote *pb.AddressPort readDeadline, writeDeadline time.Time // optional } // SetContext sets the context that is used by this Conn. // It is usually used only when using a Conn that was created in a different context, // such as when a connection is created during a warmup request but used while // servicing a user request. func (cn *Conn) SetContext(ctx context.Context) { cn.ctx = ctx } func (cn *Conn) Read(b []byte) (n int, err error) { const maxRead = 1 << 20 if len(b) > maxRead { b = b[:maxRead] } req := &pb.ReceiveRequest{ SocketDescriptor: &cn.desc, DataSize: proto.Int32(int32(len(b))), } res := &pb.ReceiveReply{} if !cn.readDeadline.IsZero() { req.TimeoutSeconds = proto.Float64(cn.readDeadline.Sub(time.Now()).Seconds()) } ctx, cancel := withDeadline(cn.ctx, cn.readDeadline) defer cancel() if err := internal.Call(ctx, "remote_socket", "Receive", req, res); err != nil { return 0, err } if len(res.Data) == 0 { return 0, io.EOF } if len(res.Data) > len(b) { return 0, fmt.Errorf("socket: internal error: read too much data: %d > %d", len(res.Data), len(b)) } return copy(b, res.Data), nil } func (cn *Conn) Write(b []byte) (n int, err error) { const lim = 1 << 20 // max per chunk for n < len(b) { chunk := b[n:] if len(chunk) > lim { chunk = chunk[:lim] } req := &pb.SendRequest{ SocketDescriptor: &cn.desc, Data: chunk, StreamOffset: &cn.offset, } res := &pb.SendReply{} if !cn.writeDeadline.IsZero() { req.TimeoutSeconds = proto.Float64(cn.writeDeadline.Sub(time.Now()).Seconds()) } ctx, cancel := withDeadline(cn.ctx, cn.writeDeadline) defer cancel() if err = internal.Call(ctx, "remote_socket", "Send", req, res); err != nil { // assume zero bytes were sent in this RPC break } n += int(res.GetDataSent()) cn.offset += int64(res.GetDataSent()) } return } func (cn *Conn) Close() error { req := &pb.CloseRequest{ SocketDescriptor: &cn.desc, } res := &pb.CloseReply{} if err := internal.Call(cn.ctx, "remote_socket", "Close", req, res); err != nil { return err } cn.desc = "CLOSED" return nil } func addr(prot pb.CreateSocketRequest_SocketProtocol, ap *pb.AddressPort) net.Addr { if ap == nil { return nil } switch prot { case pb.CreateSocketRequest_TCP: return &net.TCPAddr{ IP: net.IP(ap.PackedAddress), Port: int(*ap.Port), } case pb.CreateSocketRequest_UDP: return &net.UDPAddr{ IP: net.IP(ap.PackedAddress), Port: int(*ap.Port), } } panic("unknown protocol " + prot.String()) } func (cn *Conn) LocalAddr() net.Addr { return addr(cn.prot, cn.local) } func (cn *Conn) RemoteAddr() net.Addr { return addr(cn.prot, cn.remote) } func (cn *Conn) SetDeadline(t time.Time) error { cn.readDeadline = t cn.writeDeadline = t return nil } func (cn *Conn) SetReadDeadline(t time.Time) error { cn.readDeadline = t return nil } func (cn *Conn) SetWriteDeadline(t time.Time) error { cn.writeDeadline = t return nil } // KeepAlive signals that the connection is still in use. // It may be called to prevent the socket being closed due to inactivity. func (cn *Conn) KeepAlive() error { req := &pb.GetSocketNameRequest{ SocketDescriptor: &cn.desc, } res := &pb.GetSocketNameReply{} return internal.Call(cn.ctx, "remote_socket", "GetSocketName", req, res) } func init() { internal.RegisterErrorCodeMap("remote_socket", pb.RemoteSocketServiceError_ErrorCode_name) } ================================================ FILE: vendor/google.golang.org/appengine/socket/socket_vm.go ================================================ // Copyright 2015 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. // +build !appengine package socket import ( "net" "time" "golang.org/x/net/context" ) // Dial connects to the address addr on the network protocol. // The address format is host:port, where host may be a hostname or an IP address. // Known protocols are "tcp" and "udp". // The returned connection satisfies net.Conn, and is valid while ctx is valid; // if the connection is to be used after ctx becomes invalid, invoke SetContext // with the new context. func Dial(ctx context.Context, protocol, addr string) (*Conn, error) { conn, err := net.Dial(protocol, addr) if err != nil { return nil, err } return &Conn{conn}, nil } // DialTimeout is like Dial but takes a timeout. // The timeout includes name resolution, if required. func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) { conn, err := net.DialTimeout(protocol, addr, timeout) if err != nil { return nil, err } return &Conn{conn}, nil } // LookupIP returns the given host's IP addresses. func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) { return net.LookupIP(host) } // Conn represents a socket connection. // It implements net.Conn. type Conn struct { net.Conn } // SetContext sets the context that is used by this Conn. // It is usually used only when using a Conn that was created in a different context, // such as when a connection is created during a warmup request but used while // servicing a user request. func (cn *Conn) SetContext(ctx context.Context) { // This function is not required on managed VMs. } // KeepAlive signals that the connection is still in use. // It may be called to prevent the socket being closed due to inactivity. func (cn *Conn) KeepAlive() error { // This function is not required on managed VMs. return nil } ================================================ FILE: vendor/google.golang.org/appengine/taskqueue/taskqueue.go ================================================ // Copyright 2011 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. /* Package taskqueue provides a client for App Engine's taskqueue service. Using this service, applications may perform work outside a user's request. A Task may be constructed manually; alternatively, since the most common taskqueue operation is to add a single POST task, NewPOSTTask makes it easy. t := taskqueue.NewPOSTTask("/worker", url.Values{ "key": {key}, }) taskqueue.Add(c, t, "") // add t to the default queue */ package taskqueue import ( "errors" "fmt" "net/http" "net/url" "time" "github.com/golang/protobuf/proto" "golang.org/x/net/context" "google.golang.org/appengine" "google.golang.org/appengine/internal" dspb "google.golang.org/appengine/internal/datastore" pb "google.golang.org/appengine/internal/taskqueue" ) var ( // ErrTaskAlreadyAdded is the error returned by Add and AddMulti when a task has already been added with a particular name. ErrTaskAlreadyAdded = errors.New("taskqueue: task has already been added") ) // RetryOptions let you control whether to retry a task and the backoff intervals between tries. type RetryOptions struct { // Number of tries/leases after which the task fails permanently and is deleted. // If AgeLimit is also set, both limits must be exceeded for the task to fail permanently. RetryLimit int32 // Maximum time allowed since the task's first try before the task fails permanently and is deleted (only for push tasks). // If RetryLimit is also set, both limits must be exceeded for the task to fail permanently. AgeLimit time.Duration // Minimum time between successive tries (only for push tasks). MinBackoff time.Duration // Maximum time between successive tries (only for push tasks). MaxBackoff time.Duration // Maximum number of times to double the interval between successive tries before the intervals increase linearly (only for push tasks). MaxDoublings int32 // If MaxDoublings is zero, set ApplyZeroMaxDoublings to true to override the default non-zero value. // Otherwise a zero MaxDoublings is ignored and the default is used. ApplyZeroMaxDoublings bool } // toRetryParameter converts RetryOptions to pb.TaskQueueRetryParameters. func (opt *RetryOptions) toRetryParameters() *pb.TaskQueueRetryParameters { params := &pb.TaskQueueRetryParameters{} if opt.RetryLimit > 0 { params.RetryLimit = proto.Int32(opt.RetryLimit) } if opt.AgeLimit > 0 { params.AgeLimitSec = proto.Int64(int64(opt.AgeLimit.Seconds())) } if opt.MinBackoff > 0 { params.MinBackoffSec = proto.Float64(opt.MinBackoff.Seconds()) } if opt.MaxBackoff > 0 { params.MaxBackoffSec = proto.Float64(opt.MaxBackoff.Seconds()) } if opt.MaxDoublings > 0 || (opt.MaxDoublings == 0 && opt.ApplyZeroMaxDoublings) { params.MaxDoublings = proto.Int32(opt.MaxDoublings) } return params } // A Task represents a task to be executed. type Task struct { // Path is the worker URL for the task. // If unset, it will default to /_ah/queue/. Path string // Payload is the data for the task. // This will be delivered as the HTTP request body. // It is only used when Method is POST, PUT or PULL. // url.Values' Encode method may be used to generate this for POST requests. Payload []byte // Additional HTTP headers to pass at the task's execution time. // To schedule the task to be run with an alternate app version // or backend, set the "Host" header. Header http.Header // Method is the HTTP method for the task ("GET", "POST", etc.), // or "PULL" if this is task is destined for a pull-based queue. // If empty, this defaults to "POST". Method string // A name for the task. // If empty, a name will be chosen. Name string // Delay specifies the duration the task queue service must wait // before executing the task. // Either Delay or ETA may be set, but not both. Delay time.Duration // ETA specifies the earliest time a task may be executed (push queues) // or leased (pull queues). // Either Delay or ETA may be set, but not both. ETA time.Time // The number of times the task has been dispatched or leased. RetryCount int32 // Tag for the task. Only used when Method is PULL. Tag string // Retry options for this task. May be nil. RetryOptions *RetryOptions } func (t *Task) method() string { if t.Method == "" { return "POST" } return t.Method } // NewPOSTTask creates a Task that will POST to a path with the given form data. func NewPOSTTask(path string, params url.Values) *Task { h := make(http.Header) h.Set("Content-Type", "application/x-www-form-urlencoded") return &Task{ Path: path, Payload: []byte(params.Encode()), Header: h, Method: "POST", } } var ( currentNamespace = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace") defaultNamespace = http.CanonicalHeaderKey("X-AppEngine-Default-Namespace") ) func getDefaultNamespace(ctx context.Context) string { return internal.IncomingHeaders(ctx).Get(defaultNamespace) } func newAddReq(c context.Context, task *Task, queueName string) (*pb.TaskQueueAddRequest, error) { if queueName == "" { queueName = "default" } path := task.Path if path == "" { path = "/_ah/queue/" + queueName } eta := task.ETA if eta.IsZero() { eta = time.Now().Add(task.Delay) } else if task.Delay != 0 { panic("taskqueue: both Delay and ETA are set") } req := &pb.TaskQueueAddRequest{ QueueName: []byte(queueName), TaskName: []byte(task.Name), EtaUsec: proto.Int64(eta.UnixNano() / 1e3), } method := task.method() if method == "PULL" { // Pull-based task req.Body = task.Payload req.Mode = pb.TaskQueueMode_PULL.Enum() if task.Tag != "" { req.Tag = []byte(task.Tag) } } else { // HTTP-based task if v, ok := pb.TaskQueueAddRequest_RequestMethod_value[method]; ok { req.Method = pb.TaskQueueAddRequest_RequestMethod(v).Enum() } else { return nil, fmt.Errorf("taskqueue: bad method %q", method) } req.Url = []byte(path) for k, vs := range task.Header { for _, v := range vs { req.Header = append(req.Header, &pb.TaskQueueAddRequest_Header{ Key: []byte(k), Value: []byte(v), }) } } if method == "POST" || method == "PUT" { req.Body = task.Payload } // Namespace headers. if _, ok := task.Header[currentNamespace]; !ok { // Fetch the current namespace of this request. ns := internal.NamespaceFromContext(c) req.Header = append(req.Header, &pb.TaskQueueAddRequest_Header{ Key: []byte(currentNamespace), Value: []byte(ns), }) } if _, ok := task.Header[defaultNamespace]; !ok { // Fetch the X-AppEngine-Default-Namespace header of this request. if ns := getDefaultNamespace(c); ns != "" { req.Header = append(req.Header, &pb.TaskQueueAddRequest_Header{ Key: []byte(defaultNamespace), Value: []byte(ns), }) } } } if task.RetryOptions != nil { req.RetryParameters = task.RetryOptions.toRetryParameters() } return req, nil } var alreadyAddedErrors = map[pb.TaskQueueServiceError_ErrorCode]bool{ pb.TaskQueueServiceError_TASK_ALREADY_EXISTS: true, pb.TaskQueueServiceError_TOMBSTONED_TASK: true, } // Add adds the task to a named queue. // An empty queue name means that the default queue will be used. // Add returns an equivalent Task with defaults filled in, including setting // the task's Name field to the chosen name if the original was empty. func Add(c context.Context, task *Task, queueName string) (*Task, error) { req, err := newAddReq(c, task, queueName) if err != nil { return nil, err } res := &pb.TaskQueueAddResponse{} if err := internal.Call(c, "taskqueue", "Add", req, res); err != nil { apiErr, ok := err.(*internal.APIError) if ok && alreadyAddedErrors[pb.TaskQueueServiceError_ErrorCode(apiErr.Code)] { return nil, ErrTaskAlreadyAdded } return nil, err } resultTask := *task resultTask.Method = task.method() if task.Name == "" { resultTask.Name = string(res.ChosenTaskName) } return &resultTask, nil } // AddMulti adds multiple tasks to a named queue. // An empty queue name means that the default queue will be used. // AddMulti returns a slice of equivalent tasks with defaults filled in, including setting // each task's Name field to the chosen name if the original was empty. // If a given task is badly formed or could not be added, an appengine.MultiError is returned. func AddMulti(c context.Context, tasks []*Task, queueName string) ([]*Task, error) { req := &pb.TaskQueueBulkAddRequest{ AddRequest: make([]*pb.TaskQueueAddRequest, len(tasks)), } me, any := make(appengine.MultiError, len(tasks)), false for i, t := range tasks { req.AddRequest[i], me[i] = newAddReq(c, t, queueName) any = any || me[i] != nil } if any { return nil, me } res := &pb.TaskQueueBulkAddResponse{} if err := internal.Call(c, "taskqueue", "BulkAdd", req, res); err != nil { return nil, err } if len(res.Taskresult) != len(tasks) { return nil, errors.New("taskqueue: server error") } tasksOut := make([]*Task, len(tasks)) for i, tr := range res.Taskresult { tasksOut[i] = new(Task) *tasksOut[i] = *tasks[i] tasksOut[i].Method = tasksOut[i].method() if tasksOut[i].Name == "" { tasksOut[i].Name = string(tr.ChosenTaskName) } if *tr.Result != pb.TaskQueueServiceError_OK { if alreadyAddedErrors[*tr.Result] { me[i] = ErrTaskAlreadyAdded } else { me[i] = &internal.APIError{ Service: "taskqueue", Code: int32(*tr.Result), } } any = true } } if any { return tasksOut, me } return tasksOut, nil } // Delete deletes a task from a named queue. func Delete(c context.Context, task *Task, queueName string) error { err := DeleteMulti(c, []*Task{task}, queueName) if me, ok := err.(appengine.MultiError); ok { return me[0] } return err } // DeleteMulti deletes multiple tasks from a named queue. // If a given task could not be deleted, an appengine.MultiError is returned. func DeleteMulti(c context.Context, tasks []*Task, queueName string) error { taskNames := make([][]byte, len(tasks)) for i, t := range tasks { taskNames[i] = []byte(t.Name) } if queueName == "" { queueName = "default" } req := &pb.TaskQueueDeleteRequest{ QueueName: []byte(queueName), TaskName: taskNames, } res := &pb.TaskQueueDeleteResponse{} if err := internal.Call(c, "taskqueue", "Delete", req, res); err != nil { return err } if a, b := len(req.TaskName), len(res.Result); a != b { return fmt.Errorf("taskqueue: internal error: requested deletion of %d tasks, got %d results", a, b) } me, any := make(appengine.MultiError, len(res.Result)), false for i, ec := range res.Result { if ec != pb.TaskQueueServiceError_OK { me[i] = &internal.APIError{ Service: "taskqueue", Code: int32(ec), } any = true } } if any { return me } return nil } func lease(c context.Context, maxTasks int, queueName string, leaseTime int, groupByTag bool, tag []byte) ([]*Task, error) { if queueName == "" { queueName = "default" } req := &pb.TaskQueueQueryAndOwnTasksRequest{ QueueName: []byte(queueName), LeaseSeconds: proto.Float64(float64(leaseTime)), MaxTasks: proto.Int64(int64(maxTasks)), GroupByTag: proto.Bool(groupByTag), Tag: tag, } res := &pb.TaskQueueQueryAndOwnTasksResponse{} if err := internal.Call(c, "taskqueue", "QueryAndOwnTasks", req, res); err != nil { return nil, err } tasks := make([]*Task, len(res.Task)) for i, t := range res.Task { tasks[i] = &Task{ Payload: t.Body, Name: string(t.TaskName), Method: "PULL", ETA: time.Unix(0, *t.EtaUsec*1e3), RetryCount: *t.RetryCount, Tag: string(t.Tag), } } return tasks, nil } // Lease leases tasks from a queue. // leaseTime is in seconds. // The number of tasks fetched will be at most maxTasks. func Lease(c context.Context, maxTasks int, queueName string, leaseTime int) ([]*Task, error) { return lease(c, maxTasks, queueName, leaseTime, false, nil) } // LeaseByTag leases tasks from a queue, grouped by tag. // If tag is empty, then the returned tasks are grouped by the tag of the task with earliest ETA. // leaseTime is in seconds. // The number of tasks fetched will be at most maxTasks. func LeaseByTag(c context.Context, maxTasks int, queueName string, leaseTime int, tag string) ([]*Task, error) { return lease(c, maxTasks, queueName, leaseTime, true, []byte(tag)) } // Purge removes all tasks from a queue. func Purge(c context.Context, queueName string) error { if queueName == "" { queueName = "default" } req := &pb.TaskQueuePurgeQueueRequest{ QueueName: []byte(queueName), } res := &pb.TaskQueuePurgeQueueResponse{} return internal.Call(c, "taskqueue", "PurgeQueue", req, res) } // ModifyLease modifies the lease of a task. // Used to request more processing time, or to abandon processing. // leaseTime is in seconds and must not be negative. func ModifyLease(c context.Context, task *Task, queueName string, leaseTime int) error { if queueName == "" { queueName = "default" } req := &pb.TaskQueueModifyTaskLeaseRequest{ QueueName: []byte(queueName), TaskName: []byte(task.Name), EtaUsec: proto.Int64(task.ETA.UnixNano() / 1e3), // Used to verify ownership. LeaseSeconds: proto.Float64(float64(leaseTime)), } res := &pb.TaskQueueModifyTaskLeaseResponse{} if err := internal.Call(c, "taskqueue", "ModifyTaskLease", req, res); err != nil { return err } task.ETA = time.Unix(0, *res.UpdatedEtaUsec*1e3) return nil } // QueueStatistics represents statistics about a single task queue. type QueueStatistics struct { Tasks int // may be an approximation OldestETA time.Time // zero if there are no pending tasks Executed1Minute int // tasks executed in the last minute InFlight int // tasks executing now EnforcedRate float64 // requests per second } // QueueStats retrieves statistics about queues. func QueueStats(c context.Context, queueNames []string) ([]QueueStatistics, error) { req := &pb.TaskQueueFetchQueueStatsRequest{ QueueName: make([][]byte, len(queueNames)), } for i, q := range queueNames { if q == "" { q = "default" } req.QueueName[i] = []byte(q) } res := &pb.TaskQueueFetchQueueStatsResponse{} if err := internal.Call(c, "taskqueue", "FetchQueueStats", req, res); err != nil { return nil, err } qs := make([]QueueStatistics, len(res.Queuestats)) for i, qsg := range res.Queuestats { qs[i] = QueueStatistics{ Tasks: int(*qsg.NumTasks), } if eta := *qsg.OldestEtaUsec; eta > -1 { qs[i].OldestETA = time.Unix(0, eta*1e3) } if si := qsg.ScannerInfo; si != nil { qs[i].Executed1Minute = int(*si.ExecutedLastMinute) qs[i].InFlight = int(si.GetRequestsInFlight()) qs[i].EnforcedRate = si.GetEnforcedRate() } } return qs, nil } func setTransaction(x *pb.TaskQueueAddRequest, t *dspb.Transaction) { x.Transaction = t } func init() { internal.RegisterErrorCodeMap("taskqueue", pb.TaskQueueServiceError_ErrorCode_name) // Datastore error codes are shifted by DATASTORE_ERROR when presented through taskqueue. dsCode := int32(pb.TaskQueueServiceError_DATASTORE_ERROR) + int32(dspb.Error_TIMEOUT) internal.RegisterTimeoutErrorCode("taskqueue", dsCode) // Transaction registration. internal.RegisterTransactionSetter(setTransaction) internal.RegisterTransactionSetter(func(x *pb.TaskQueueBulkAddRequest, t *dspb.Transaction) { for _, req := range x.AddRequest { setTransaction(req, t) } }) } ================================================ FILE: vendor/google.golang.org/appengine/timeout.go ================================================ // Copyright 2013 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. package appengine import "golang.org/x/net/context" // IsTimeoutError reports whether err is a timeout error. func IsTimeoutError(err error) bool { if err == context.DeadlineExceeded { return true } if t, ok := err.(interface { IsTimeout() bool }); ok { return t.IsTimeout() } return false } ================================================ FILE: vendor/google.golang.org/appengine/urlfetch/urlfetch.go ================================================ // Copyright 2011 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. // Package urlfetch provides an http.RoundTripper implementation // for fetching URLs via App Engine's urlfetch service. package urlfetch import ( "errors" "fmt" "io" "io/ioutil" "net/http" "net/url" "strconv" "strings" "time" "github.com/golang/protobuf/proto" "golang.org/x/net/context" "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/urlfetch" ) // Transport is an implementation of http.RoundTripper for // App Engine. Users should generally create an http.Client using // this transport and use the Client rather than using this transport // directly. type Transport struct { Context context.Context // Controls whether the application checks the validity of SSL certificates // over HTTPS connections. A value of false (the default) instructs the // application to send a request to the server only if the certificate is // valid and signed by a trusted certificate authority (CA), and also // includes a hostname that matches the certificate. A value of true // instructs the application to perform no certificate validation. AllowInvalidServerCertificate bool } // Verify statically that *Transport implements http.RoundTripper. var _ http.RoundTripper = (*Transport)(nil) // Client returns an *http.Client using a default urlfetch Transport. This // client will have the default deadline of 5 seconds, and will check the // validity of SSL certificates. // // Any deadline of the provided context will be used for requests through this client; // if the client does not have a deadline then a 5 second default is used. func Client(ctx context.Context) *http.Client { return &http.Client{ Transport: &Transport{ Context: ctx, }, } } type bodyReader struct { content []byte truncated bool closed bool } // ErrTruncatedBody is the error returned after the final Read() from a // response's Body if the body has been truncated by App Engine's proxy. var ErrTruncatedBody = errors.New("urlfetch: truncated body") func statusCodeToText(code int) string { if t := http.StatusText(code); t != "" { return t } return strconv.Itoa(code) } func (br *bodyReader) Read(p []byte) (n int, err error) { if br.closed { if br.truncated { return 0, ErrTruncatedBody } return 0, io.EOF } n = copy(p, br.content) if n > 0 { br.content = br.content[n:] return } if br.truncated { br.closed = true return 0, ErrTruncatedBody } return 0, io.EOF } func (br *bodyReader) Close() error { br.closed = true br.content = nil return nil } // A map of the URL Fetch-accepted methods that take a request body. var methodAcceptsRequestBody = map[string]bool{ "POST": true, "PUT": true, "PATCH": true, } // urlString returns a valid string given a URL. This function is necessary because // the String method of URL doesn't correctly handle URLs with non-empty Opaque values. // See http://code.google.com/p/go/issues/detail?id=4860. func urlString(u *url.URL) string { if u.Opaque == "" || strings.HasPrefix(u.Opaque, "//") { return u.String() } aux := *u aux.Opaque = "//" + aux.Host + aux.Opaque return aux.String() } // RoundTrip issues a single HTTP request and returns its response. Per the // http.RoundTripper interface, RoundTrip only returns an error if there // was an unsupported request or the URL Fetch proxy fails. // Note that HTTP response codes such as 5xx, 403, 404, etc are not // errors as far as the transport is concerned and will be returned // with err set to nil. func (t *Transport) RoundTrip(req *http.Request) (res *http.Response, err error) { methNum, ok := pb.URLFetchRequest_RequestMethod_value[req.Method] if !ok { return nil, fmt.Errorf("urlfetch: unsupported HTTP method %q", req.Method) } method := pb.URLFetchRequest_RequestMethod(methNum) freq := &pb.URLFetchRequest{ Method: &method, Url: proto.String(urlString(req.URL)), FollowRedirects: proto.Bool(false), // http.Client's responsibility MustValidateServerCertificate: proto.Bool(!t.AllowInvalidServerCertificate), } if deadline, ok := t.Context.Deadline(); ok { freq.Deadline = proto.Float64(deadline.Sub(time.Now()).Seconds()) } for k, vals := range req.Header { for _, val := range vals { freq.Header = append(freq.Header, &pb.URLFetchRequest_Header{ Key: proto.String(k), Value: proto.String(val), }) } } if methodAcceptsRequestBody[req.Method] && req.Body != nil { // Avoid a []byte copy if req.Body has a Bytes method. switch b := req.Body.(type) { case interface { Bytes() []byte }: freq.Payload = b.Bytes() default: freq.Payload, err = ioutil.ReadAll(req.Body) if err != nil { return nil, err } } } fres := &pb.URLFetchResponse{} if err := internal.Call(t.Context, "urlfetch", "Fetch", freq, fres); err != nil { return nil, err } res = &http.Response{} res.StatusCode = int(*fres.StatusCode) res.Status = fmt.Sprintf("%d %s", res.StatusCode, statusCodeToText(res.StatusCode)) res.Header = make(http.Header) res.Request = req // Faked: res.ProtoMajor = 1 res.ProtoMinor = 1 res.Proto = "HTTP/1.1" res.Close = true for _, h := range fres.Header { hkey := http.CanonicalHeaderKey(*h.Key) hval := *h.Value if hkey == "Content-Length" { // Will get filled in below for all but HEAD requests. if req.Method == "HEAD" { res.ContentLength, _ = strconv.ParseInt(hval, 10, 64) } continue } res.Header.Add(hkey, hval) } if req.Method != "HEAD" { res.ContentLength = int64(len(fres.Content)) } truncated := fres.GetContentWasTruncated() res.Body = &bodyReader{content: fres.Content, truncated: truncated} return } func init() { internal.RegisterErrorCodeMap("urlfetch", pb.URLFetchServiceError_ErrorCode_name) internal.RegisterTimeoutErrorCode("urlfetch", int32(pb.URLFetchServiceError_DEADLINE_EXCEEDED)) } ================================================ FILE: vendor/google.golang.org/appengine/user/oauth.go ================================================ // Copyright 2012 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. package user import ( "golang.org/x/net/context" "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/user" ) // CurrentOAuth returns the user associated with the OAuth consumer making this // request. If the OAuth consumer did not make a valid OAuth request, or the // scopes is non-empty and the current user does not have at least one of the // scopes, this method will return an error. func CurrentOAuth(c context.Context, scopes ...string) (*User, error) { req := &pb.GetOAuthUserRequest{} if len(scopes) != 1 || scopes[0] != "" { // The signature for this function used to be CurrentOAuth(Context, string). // Ignore the singular "" scope to preserve existing behavior. req.Scopes = scopes } res := &pb.GetOAuthUserResponse{} err := internal.Call(c, "user", "GetOAuthUser", req, res) if err != nil { return nil, err } return &User{ Email: *res.Email, AuthDomain: *res.AuthDomain, Admin: res.GetIsAdmin(), ID: *res.UserId, ClientID: res.GetClientId(), }, nil } // OAuthConsumerKey returns the OAuth consumer key provided with the current // request. This method will return an error if the OAuth request was invalid. func OAuthConsumerKey(c context.Context) (string, error) { req := &pb.CheckOAuthSignatureRequest{} res := &pb.CheckOAuthSignatureResponse{} err := internal.Call(c, "user", "CheckOAuthSignature", req, res) if err != nil { return "", err } return *res.OauthConsumerKey, err } ================================================ FILE: vendor/google.golang.org/appengine/user/user.go ================================================ // Copyright 2011 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. // Package user provides a client for App Engine's user authentication service. package user import ( "strings" "github.com/golang/protobuf/proto" "golang.org/x/net/context" "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/user" ) // User represents a user of the application. type User struct { Email string AuthDomain string Admin bool // ID is the unique permanent ID of the user. // It is populated if the Email is associated // with a Google account, or empty otherwise. ID string // ClientID is the ID of the pre-registered client so its identity can be verified. // See https://developers.google.com/console/help/#generatingoauth2 for more information. ClientID string FederatedIdentity string FederatedProvider string } // String returns a displayable name for the user. func (u *User) String() string { if u.AuthDomain != "" && strings.HasSuffix(u.Email, "@"+u.AuthDomain) { return u.Email[:len(u.Email)-len("@"+u.AuthDomain)] } if u.FederatedIdentity != "" { return u.FederatedIdentity } return u.Email } // LoginURL returns a URL that, when visited, prompts the user to sign in, // then redirects the user to the URL specified by dest. func LoginURL(c context.Context, dest string) (string, error) { return LoginURLFederated(c, dest, "") } // LoginURLFederated is like LoginURL but accepts a user's OpenID identifier. func LoginURLFederated(c context.Context, dest, identity string) (string, error) { req := &pb.CreateLoginURLRequest{ DestinationUrl: proto.String(dest), } if identity != "" { req.FederatedIdentity = proto.String(identity) } res := &pb.CreateLoginURLResponse{} if err := internal.Call(c, "user", "CreateLoginURL", req, res); err != nil { return "", err } return *res.LoginUrl, nil } // LogoutURL returns a URL that, when visited, signs the user out, // then redirects the user to the URL specified by dest. func LogoutURL(c context.Context, dest string) (string, error) { req := &pb.CreateLogoutURLRequest{ DestinationUrl: proto.String(dest), } res := &pb.CreateLogoutURLResponse{} if err := internal.Call(c, "user", "CreateLogoutURL", req, res); err != nil { return "", err } return *res.LogoutUrl, nil } func init() { internal.RegisterErrorCodeMap("user", pb.UserServiceError_ErrorCode_name) } ================================================ FILE: vendor/google.golang.org/appengine/user/user_classic.go ================================================ // Copyright 2015 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. // +build appengine package user import ( "appengine/user" "golang.org/x/net/context" "google.golang.org/appengine/internal" ) func Current(ctx context.Context) *User { u := user.Current(internal.ClassicContextFromContext(ctx)) if u == nil { return nil } // Map appengine/user.User to this package's User type. return &User{ Email: u.Email, AuthDomain: u.AuthDomain, Admin: u.Admin, ID: u.ID, FederatedIdentity: u.FederatedIdentity, FederatedProvider: u.FederatedProvider, } } func IsAdmin(ctx context.Context) bool { return user.IsAdmin(internal.ClassicContextFromContext(ctx)) } ================================================ FILE: vendor/google.golang.org/appengine/user/user_vm.go ================================================ // Copyright 2014 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. // +build !appengine package user import ( "golang.org/x/net/context" "google.golang.org/appengine/internal" ) // Current returns the currently logged-in user, // or nil if the user is not signed in. func Current(c context.Context) *User { h := internal.IncomingHeaders(c) u := &User{ Email: h.Get("X-AppEngine-User-Email"), AuthDomain: h.Get("X-AppEngine-Auth-Domain"), ID: h.Get("X-AppEngine-User-Id"), Admin: h.Get("X-AppEngine-User-Is-Admin") == "1", FederatedIdentity: h.Get("X-AppEngine-Federated-Identity"), FederatedProvider: h.Get("X-AppEngine-Federated-Provider"), } if u.Email == "" && u.FederatedIdentity == "" { return nil } return u } // IsAdmin returns true if the current user is signed in and // is currently registered as an administrator of the application. func IsAdmin(c context.Context) bool { h := internal.IncomingHeaders(c) return h.Get("X-AppEngine-User-Is-Admin") == "1" } ================================================ FILE: vendor/google.golang.org/appengine/xmpp/xmpp.go ================================================ // Copyright 2011 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. /* Package xmpp provides the means to send and receive instant messages to and from users of XMPP-compatible services. To send a message, m := &xmpp.Message{ To: []string{"kaylee@example.com"}, Body: `Hi! How's the carrot?`, } err := m.Send(c) To receive messages, func init() { xmpp.Handle(handleChat) } func handleChat(c context.Context, m *xmpp.Message) { // ... } */ package xmpp import ( "errors" "fmt" "net/http" "golang.org/x/net/context" "google.golang.org/appengine" "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/xmpp" ) // Message represents an incoming chat message. type Message struct { // Sender is the JID of the sender. // Optional for outgoing messages. Sender string // To is the intended recipients of the message. // Incoming messages will have exactly one element. To []string // Body is the body of the message. Body string // Type is the message type, per RFC 3921. // It defaults to "chat". Type string // RawXML is whether the body contains raw XML. RawXML bool } // Presence represents an outgoing presence update. type Presence struct { // Sender is the JID (optional). Sender string // The intended recipient of the presence update. To string // Type, per RFC 3921 (optional). Defaults to "available". Type string // State of presence (optional). // Valid values: "away", "chat", "xa", "dnd" (RFC 3921). State string // Free text status message (optional). Status string } var ( ErrPresenceUnavailable = errors.New("xmpp: presence unavailable") ErrInvalidJID = errors.New("xmpp: invalid JID") ) // Handle arranges for f to be called for incoming XMPP messages. // Only messages of type "chat" or "normal" will be handled. func Handle(f func(c context.Context, m *Message)) { http.HandleFunc("/_ah/xmpp/message/chat/", func(_ http.ResponseWriter, r *http.Request) { f(appengine.NewContext(r), &Message{ Sender: r.FormValue("from"), To: []string{r.FormValue("to")}, Body: r.FormValue("body"), }) }) } // Send sends a message. // If any failures occur with specific recipients, the error will be an appengine.MultiError. func (m *Message) Send(c context.Context) error { req := &pb.XmppMessageRequest{ Jid: m.To, Body: &m.Body, RawXml: &m.RawXML, } if m.Type != "" && m.Type != "chat" { req.Type = &m.Type } if m.Sender != "" { req.FromJid = &m.Sender } res := &pb.XmppMessageResponse{} if err := internal.Call(c, "xmpp", "SendMessage", req, res); err != nil { return err } if len(res.Status) != len(req.Jid) { return fmt.Errorf("xmpp: sent message to %d JIDs, but only got %d statuses back", len(req.Jid), len(res.Status)) } me, any := make(appengine.MultiError, len(req.Jid)), false for i, st := range res.Status { if st != pb.XmppMessageResponse_NO_ERROR { me[i] = errors.New(st.String()) any = true } } if any { return me } return nil } // Invite sends an invitation. If the from address is an empty string // the default (yourapp@appspot.com/bot) will be used. func Invite(c context.Context, to, from string) error { req := &pb.XmppInviteRequest{ Jid: &to, } if from != "" { req.FromJid = &from } res := &pb.XmppInviteResponse{} return internal.Call(c, "xmpp", "SendInvite", req, res) } // Send sends a presence update. func (p *Presence) Send(c context.Context) error { req := &pb.XmppSendPresenceRequest{ Jid: &p.To, } if p.State != "" { req.Show = &p.State } if p.Type != "" { req.Type = &p.Type } if p.Sender != "" { req.FromJid = &p.Sender } if p.Status != "" { req.Status = &p.Status } res := &pb.XmppSendPresenceResponse{} return internal.Call(c, "xmpp", "SendPresence", req, res) } var presenceMap = map[pb.PresenceResponse_SHOW]string{ pb.PresenceResponse_NORMAL: "", pb.PresenceResponse_AWAY: "away", pb.PresenceResponse_DO_NOT_DISTURB: "dnd", pb.PresenceResponse_CHAT: "chat", pb.PresenceResponse_EXTENDED_AWAY: "xa", } // GetPresence retrieves a user's presence. // If the from address is an empty string the default // (yourapp@appspot.com/bot) will be used. // Possible return values are "", "away", "dnd", "chat", "xa". // ErrPresenceUnavailable is returned if the presence is unavailable. func GetPresence(c context.Context, to string, from string) (string, error) { req := &pb.PresenceRequest{ Jid: &to, } if from != "" { req.FromJid = &from } res := &pb.PresenceResponse{} if err := internal.Call(c, "xmpp", "GetPresence", req, res); err != nil { return "", err } if !*res.IsAvailable || res.Presence == nil { return "", ErrPresenceUnavailable } presence, ok := presenceMap[*res.Presence] if ok { return presence, nil } return "", fmt.Errorf("xmpp: unknown presence %v", *res.Presence) } // GetPresenceMulti retrieves multiple users' presence. // If the from address is an empty string the default // (yourapp@appspot.com/bot) will be used. // Possible return values are "", "away", "dnd", "chat", "xa". // If any presence is unavailable, an appengine.MultiError is returned func GetPresenceMulti(c context.Context, to []string, from string) ([]string, error) { req := &pb.BulkPresenceRequest{ Jid: to, } if from != "" { req.FromJid = &from } res := &pb.BulkPresenceResponse{} if err := internal.Call(c, "xmpp", "BulkGetPresence", req, res); err != nil { return nil, err } presences := make([]string, 0, len(res.PresenceResponse)) errs := appengine.MultiError{} addResult := func(presence string, err error) { presences = append(presences, presence) errs = append(errs, err) } anyErr := false for _, subres := range res.PresenceResponse { if !subres.GetValid() { anyErr = true addResult("", ErrInvalidJID) continue } if !*subres.IsAvailable || subres.Presence == nil { anyErr = true addResult("", ErrPresenceUnavailable) continue } presence, ok := presenceMap[*subres.Presence] if ok { addResult(presence, nil) } else { anyErr = true addResult("", fmt.Errorf("xmpp: unknown presence %q", *subres.Presence)) } } if anyErr { return presences, errs } return presences, nil } func init() { internal.RegisterErrorCodeMap("xmpp", pb.XmppServiceError_ErrorCode_name) } ================================================ FILE: vendor/google.golang.org/cloud/.travis.yml ================================================ language: go go: - 1.4 - tip install: - go get -v google.golang.org/cloud/... script: - openssl aes-256-cbc -K $encrypted_912ff8fa81ad_key -iv $encrypted_912ff8fa81ad_iv -in key.json.enc -out key.json -d - GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" GCLOUD_TESTS_GOLANG_KEY="$(pwd)/key.json" go test -v -tags=integration google.golang.org/cloud/... ================================================ FILE: vendor/google.golang.org/cloud/AUTHORS ================================================ # This is the official list of cloud authors for copyright purposes. # This file is distinct from the CONTRIBUTORS files. # See the latter for an explanation. # Names should be added to this file as: # Name or Organization # The email address is not required for organizations. Google Inc. Palm Stone Games, Inc. Péter Szilágyi Tyler Treat ================================================ FILE: vendor/google.golang.org/cloud/CONTRIBUTING.md ================================================ # Contributing 1. Sign one of the contributor license agreements below. 1. `go get golang.org/x/review/git-review` to install the code reviewing tool. 1. Get the cloud package by running `go get -d google.golang.org/cloud`. If you've already got the package, make sure that the remote git origin is https://code.googlesource.com/gocloud. `git remote set-url origin https://code.googlesource.com/gocloud` 1. Make changes and create a change by running `review change `, provide a command message, and use `review mail` to create a Gerrit CL. 1. Keep amending to the change and mail as your recieve feedback. ## Integration Tests Additional to the unit tests, you may run the integration test suite. To run the integrations tests, creating and configuration of a project in the Google Developers Console is required. Once you create a project, set the following environment variables to be able to run the against the actual APIs. - **GCLOUD_TESTS_GOLANG_PROJECT_ID**: Developers Console project's ID (e.g. bamboo-shift-455) - **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file. Create a storage bucket with the same name as the project id set in **GCLOUD_TESTS_GOLANG_PROJECT_ID**. The storage integration test will create and delete some objects in this bucket. Install the [gcloud command-line tool][gcloudcli] to your machine and use it to create the indexes used in the datastore integration tests with indexes found in `datastore/testdata/index.yaml`: From the project's root directory: ``` sh # Install the app component $ gcloud components update app # Set the default project in your env $ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID # Authenticate the gcloud tool with your account $ gcloud auth login # Create the indexes $ gcloud preview datastore create-indexes datastore/testdata ``` You can run the integration tests by running: ``` sh $ go test -v -tags=integration google.golang.org/cloud/... ``` ## Contributor License Agreements Before we can accept your pull requests you'll need to sign a Contributor License Agreement (CLA): - **If you are an individual writing original source code** and **you own the - intellectual property**, then you'll need to sign an [individual CLA][indvcla]. - **If you work for a company that wants to allow you to contribute your work**, then you'll need to sign a [corporate CLA][corpcla]. You can sign these electronically (just scroll to the bottom). After that, we'll be able to accept your pull requests. [gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/ [indvcla]: https://developers.google.com/open-source/cla/individual [corpcla]: https://developers.google.com/open-source/cla/corporate ================================================ FILE: vendor/google.golang.org/cloud/CONTRIBUTORS ================================================ # People who have agreed to one of the CLAs and can contribute patches. # The AUTHORS file lists the copyright holders; this file # lists people. For example, Google employees are listed here # but not in AUTHORS, because Google holds the copyright. # # https://developers.google.com/open-source/cla/individual # https://developers.google.com/open-source/cla/corporate # # Names should be added to this file as: # Name # Keep the list alphabetically sorted. Andrew Gerrand Brad Fitzpatrick Burcu Dogan Dave Day David Symonds Glenn Lewis Johan Euphrosine Luna Duclos Michael McGreevy Péter Szilágyi Tyler Treat ================================================ FILE: vendor/google.golang.org/cloud/LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2014 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: vendor/google.golang.org/cloud/README.md ================================================ # Google Cloud for Go [![Build Status](https://travis-ci.org/GoogleCloudPlatform/gcloud-golang.svg?branch=master)](https://travis-ci.org/GoogleCloudPlatform/gcloud-golang) **NOTE:** These packages are experimental, and may occasionally make backwards-incompatible changes. **NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud). Go packages for Google Cloud Platform services. Supported APIs include: * Google Cloud Datastore * Google Cloud Storage * Google Cloud Pub/Sub * Google Cloud Container Engine ``` go import "google.golang.org/cloud" ``` Documentation and examples are available at [https://godoc.org/google.golang.org/cloud](https://godoc.org/google.golang.org/cloud). ## Authorization Authorization, throughout the package, is delegated to the godoc.org/golang.org/x/oauth2. Refer to the [godoc documentation](https://godoc.org/golang.org/x/oauth2) for examples on using oauth2 with the Cloud package. ## Google Cloud Datastore [Google Cloud Datastore][cloud-datastore] ([docs][cloud-datastore-docs]) is a fully managed, schemaless database for storing non-relational data. Cloud Datastore automatically scales with your users and supports ACID transactions, high availability of reads and writes, strong consistency for reads and ancestor queries, and eventual consistency for all other queries. Follow the [activation instructions][cloud-datastore-activation] to use the Google Cloud Datastore API with your project. [https://godoc.org/google.golang.org/cloud/datastore](https://godoc.org/google.golang.org/cloud/datastore) ```go type Post struct { Title string Body string `datastore:",noindex"` PublishedAt time.Time } keys := []*datastore.Key{ datastore.NewKey(ctx, "Post", "post1", 0, nil), datastore.NewKey(ctx, "Post", "post2", 0, nil), } posts := []*Post{ {Title: "Post 1", Body: "...", PublishedAt: time.Now()}, {Title: "Post 2", Body: "...", PublishedAt: time.Now()}, } if _, err := datastore.PutMulti(ctx, keys, posts); err != nil { log.Println(err) } ``` ## Google Cloud Storage [Google Cloud Storage][cloud-storage] ([docs][cloud-storage-docs]) allows you to store data on Google infrastructure with very high reliability, performance and availability, and can be used to distribute large data objects to users via direct download. [https://godoc.org/google.golang.org/cloud/storage](https://godoc.org/google.golang.org/cloud/storage) ```go // Read the object1 from bucket. rc, err := storage.NewReader(ctx, "bucket", "object1") if err != nil { log.Fatal(err) } slurp, err := ioutil.ReadAll(rc) rc.Close() if err != nil { log.Fatal(err) } ``` ## Google Cloud Pub/Sub (Alpha) > Google Cloud Pub/Sub is in **Alpha status**. As a result, it might change in > backward-incompatible ways and is not recommended for production use. It is not > subject to any SLA or deprecation policy. [Google Cloud Pub/Sub][cloud-pubsub] ([docs][cloud-pubsub-docs]) allows you to connect your services with reliable, many-to-many, asynchronous messaging hosted on Google's infrastructure. Cloud Pub/Sub automatically scales as you need it and provides a foundation for building your own robust, global services. [https://godoc.org/google.golang.org/cloud/pubsub](https://godoc.org/google.golang.org/cloud/pubsub) ```go // Publish "hello world" on topic1. msgIDs, err := pubsub.Publish(ctx, "topic1", &pubsub.Message{ Data: []byte("hello world"), }) if err != nil { log.Println(err) } // Pull messages via subscription1. msgs, err := pubsub.Pull(ctx, "subscription1", 1) if err != nil { log.Println(err) } ``` ## Contributing Contributions are welcome. Please, see the [CONTRIBUTING](https://github.com/GoogleCloudPlatform/gcloud-golang/blob/master/CONTRIBUTING.md) document for details. We're using Gerrit for our code reviews. Please don't open pull requests against this repo, new pull requests will be automatically closed. [cloud-datastore]: https://cloud.google.com/datastore/ [cloud-datastore-docs]: https://cloud.google.com/datastore/docs [cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate [cloud-pubsub]: https://cloud.google.com/pubsub/ [cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs [cloud-storage]: https://cloud.google.com/storage/ [cloud-storage-docs]: https://cloud.google.com/storage/docs/overview [cloud-storage-create-bucket]: https://cloud.google.com/storage/docs/cloud-console#_creatingbuckets ================================================ FILE: vendor/google.golang.org/cloud/bigquery/bigquery.go ================================================ // Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery // TODO(mcgreevy): support dry-run mode when creating jobs. import ( "fmt" "net/http" "golang.org/x/net/context" bq "google.golang.org/api/bigquery/v2" ) // A Source is a source of data for the Copy function. type Source interface { implementsSource() } // A Destination is a destination of data for the Copy function. type Destination interface { implementsDestination() } // An Option is an optional argument to Copy. type Option interface { implementsOption() } // A ReadSource is a source of data for the Read function. type ReadSource interface { implementsReadSource() } // A ReadOption is an optional argument to Read. type ReadOption interface { customizeRead(conf *pagingConf) } const Scope = "https://www.googleapis.com/auth/bigquery" // Client may be used to perform BigQuery operations. type Client struct { service service projectID string } // Note: many of the methods on *Client appear in the various *_op.go source files. // NewClient constructs a new Client which can perform BigQuery operations. // Operations performed via the client are billed to the specified GCP project. // The supplied http.Client is used for making requests to the BigQuery server and must be capable of // authenticating requests with Scope. func NewClient(client *http.Client, projectID string) (*Client, error) { bqService, err := newBigqueryService(client) if err != nil { return nil, fmt.Errorf("constructing bigquery client: %v", err) } c := &Client{ service: bqService, projectID: projectID, } return c, nil } // initJobProto creates and returns a bigquery Job proto. // The proto is customized using any jobOptions in options. // The list of Options is returned with the jobOptions removed. func initJobProto(projectID string, options []Option) (*bq.Job, []Option) { job := &bq.Job{} var other []Option for _, opt := range options { if o, ok := opt.(jobOption); ok { o.customizeJob(job, projectID) } else { other = append(other, opt) } } return job, other } // Copy starts a BigQuery operation to copy data from a Source to a Destination. func (c *Client) Copy(ctx context.Context, dst Destination, src Source, options ...Option) (*Job, error) { switch dst := dst.(type) { case *Table: switch src := src.(type) { case *GCSReference: return c.load(ctx, dst, src, options) case *Table: return c.cp(ctx, dst, Tables{src}, options) case Tables: return c.cp(ctx, dst, src, options) case *Query: return c.query(ctx, dst, src, options) } case *GCSReference: if src, ok := src.(*Table); ok { return c.extract(ctx, dst, src, options) } } return nil, fmt.Errorf("no Copy operation matches dst/src pair: dst: %T ; src: %T", dst, src) } // Read fetches data from a Source and returns the data via an Iterator. func (c *Client) Read(ctx context.Context, src ReadSource, options ...ReadOption) (*Iterator, error) { // TODO(mcgreevy): support Query as a ReadSource. // TODO(mcgreevy): use ctx. switch src := src.(type) { case *Table: return c.readTable(src, options) } return nil, fmt.Errorf("src (%T) does not support the Read operation", src) } ================================================ FILE: vendor/google.golang.org/cloud/bigquery/copy_op.go ================================================ // Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "fmt" "golang.org/x/net/context" bq "google.golang.org/api/bigquery/v2" ) type copyOption interface { customizeCopy(conf *bq.JobConfigurationTableCopy, projectID string) } func (c *Client) cp(ctx context.Context, dst *Table, src Tables, options []Option) (*Job, error) { job, options := initJobProto(c.projectID, options) payload := &bq.JobConfigurationTableCopy{} dst.customizeCopyDst(payload, c.projectID) src.customizeCopySrc(payload, c.projectID) for _, opt := range options { o, ok := opt.(copyOption) if !ok { return nil, fmt.Errorf("option (%#v) not applicable to dst/src pair: dst: %T ; src: %T", opt, dst, src) } o.customizeCopy(payload, c.projectID) } job.Configuration = &bq.JobConfiguration{ Copy: payload, } return c.service.insertJob(ctx, job, c.projectID) } ================================================ FILE: vendor/google.golang.org/cloud/bigquery/doc.go ================================================ // Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package bigquery provides a client for the BigQuery service. // // Note: This package is a work-in-progress. Backwards-incompatible changes should be expected. package bigquery ================================================ FILE: vendor/google.golang.org/cloud/bigquery/error.go ================================================ // Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "fmt" bq "google.golang.org/api/bigquery/v2" ) // An Error contains detailed information about an error encountered while processing a job. type Error struct { // Mirrors bq.ErrorProto, but drops DebugInfo Location, Message, Reason string } func (e Error) Error() string { return fmt.Sprintf("{Location: %q; Message: %q; Reason: %q}", e.Location, e.Message, e.Reason) } func errorFromErrorProto(ep *bq.ErrorProto) *Error { if ep == nil { return nil } return &Error{ Location: ep.Location, Message: ep.Message, Reason: ep.Reason, } } ================================================ FILE: vendor/google.golang.org/cloud/bigquery/extract_op.go ================================================ // Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "fmt" "golang.org/x/net/context" bq "google.golang.org/api/bigquery/v2" ) type extractOption interface { customizeExtract(conf *bq.JobConfigurationExtract, projectID string) } // DisableHeader returns an Option that disables the printing of a header row in exported data. func DisableHeader() Option { return disableHeader{} } type disableHeader struct{} func (opt disableHeader) implementsOption() {} func (opt disableHeader) customizeExtract(conf *bq.JobConfigurationExtract, projectID string) { conf.PrintHeader = false } func (c *Client) extract(ctx context.Context, dst *GCSReference, src *Table, options []Option) (*Job, error) { job, options := initJobProto(c.projectID, options) payload := &bq.JobConfigurationExtract{} dst.customizeExtractDst(payload, c.projectID) src.customizeExtractSrc(payload, c.projectID) for _, opt := range options { o, ok := opt.(extractOption) if !ok { return nil, fmt.Errorf("option (%#v) not applicable to dst/src pair: dst: %T ; src: %T", opt, dst, src) } o.customizeExtract(payload, c.projectID) } job.Configuration = &bq.JobConfiguration{ Extract: payload, } return c.service.insertJob(ctx, job, c.projectID) } ================================================ FILE: vendor/google.golang.org/cloud/bigquery/gcs.go ================================================ // Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import bq "google.golang.org/api/bigquery/v2" // GCSReference is a reference to one or more Google Cloud Storage objects, which together constitute // an input or output to a BigQuery operation. type GCSReference struct { uris []string // FieldDelimiter is the separator for fields in a CSV file, used when loading or exporting data. // The default is ",". FieldDelimiter string // The number of rows at the top of a CSV file that BigQuery will skip when loading the data. SkipLeadingRows int64 // SourceFormat is the format of the GCS data to be loaded into BigQuery. // Allowed values are: CSV, JSON, DatastoreBackup. The default is CSV. SourceFormat DataFormat // Only used when loading data. Encoding Encoding // Quote is the value used to quote data sections in a CSV file. // The default quotation character is the double quote ("), which is used if both Quote and ForceZeroQuote are unset. // To specify that no character should be interpreted as a quotation character, set ForceZeroQuote to true. // Only used when loading data. Quote string ForceZeroQuote bool // DestinationFormat is the format to use when writing exported files. // Allowed values are: CSV, Avro, JSON. The default is CSV. // CSV is not supported for tables with nested or repeated fields. DestinationFormat DataFormat // Only used when writing data. Default is None. Compression Compression } func (gcs *GCSReference) implementsSource() {} func (gcs *GCSReference) implementsDestination() {} // NewGCSReference constructs a reference to one or more Google Cloud Storage objects, which together constitute a data source or destination. // In the simple case, a single URI in the form gs://bucket/object may refer to a single GCS object. // Data may also be split into mutiple files, if multiple URIs or URIs containing wildcards are provided. // Each URI may contain one '*' wildcard character, which (if present) must come after the bucket name. // For more information about the treatment of wildcards and multiple URIs, // see https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple func (c *Client) NewGCSReference(uri ...string) *GCSReference { return &GCSReference{uris: uri} } type DataFormat string const ( CSV DataFormat = "CSV" Avro DataFormat = "AVRO" JSON DataFormat = "NEWLINE_DELIMITED_JSON" DatastoreBackup DataFormat = "DATASTORE_BACKUP" ) // Encoding specifies the character encoding of data to be loaded into BigQuery. // See https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.encoding // for more details about how this is used. type Encoding string const ( UTF_8 Encoding = "UTF-8" ISO_8859_1 Encoding = "ISO-8859-1" ) // Compression is the type of compression to apply when writing data to Google Cloud Storage. type Compression string const ( None Compression = "NONE" Gzip Compression = "GZIP" ) func (gcs *GCSReference) customizeLoadSrc(conf *bq.JobConfigurationLoad, projectID string) { conf.SourceUris = gcs.uris conf.SkipLeadingRows = gcs.SkipLeadingRows conf.SourceFormat = string(gcs.SourceFormat) conf.Encoding = string(gcs.Encoding) conf.FieldDelimiter = gcs.FieldDelimiter // TODO(mcgreevy): take into account gcs.ForceZeroQuote once the underlying library supports it. conf.Quote = gcs.Quote } func (gcs *GCSReference) customizeExtractDst(conf *bq.JobConfigurationExtract, projectID string) { conf.DestinationUris = gcs.uris conf.Compression = string(gcs.Compression) conf.DestinationFormat = string(gcs.DestinationFormat) conf.FieldDelimiter = gcs.FieldDelimiter } ================================================ FILE: vendor/google.golang.org/cloud/bigquery/iterator.go ================================================ // Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "fmt" "golang.org/x/net/context" ) // Iterator provides access to the result of a BigQuery lookup. // Next must be called before the first call to Get. type Iterator struct { s service // conf contains the information necessary to make the next readTabledata call. // conf is set to nil when there is no more data to be fetched from the server. conf *readTabledataConf rs [][]Value // contains prefetched rows. The first element is returned by Get. err error // contains any error encountered during calls to Next. } // Next advances the Iterator to the next row, making that row available // via the Get method. // Next must be called before the first call to Get. // Next returns false when there are no more rows available, either because // the end of the output was reached, or because there was an error (consult // the Err method to determine which). func (it *Iterator) Next(ctx context.Context) bool { if it.err != nil { return false } if len(it.rs) > 0 { it.rs = it.rs[1:] } if len(it.rs) == 0 { it.fetchRows(ctx) } return it.hasCurrentRow() } func (it *Iterator) hasCurrentRow() bool { return it.err == nil && len(it.rs) != 0 } // fetchRows fetches a series of rows from the BigQuery service. // The fetched rows will be returned via subsequent calls to Get. func (it *Iterator) fetchRows(ctx context.Context) { if it.conf == nil { return } // TODO(mcgreevy): refactor to support reads of query results. res, err := it.s.readTabledata(ctx, it.conf) if err != nil { it.err = err return } if res.pageToken == "" { // No more data. it.conf = nil } else { it.conf.paging.pageToken = res.pageToken } it.rs = res.rows } // Err returns the last error encountered by Next, or nil for no error. func (it *Iterator) Err() error { return it.err } // Get loads the current row into dst, which must implement ValueLoader. func (it *Iterator) Get(dst interface{}) error { if !it.hasCurrentRow() { return fmt.Errorf("Get called on iterator with no remaining values") } if dst, ok := dst.(ValueLoader); ok { return dst.Load(it.rs[0]) } return fmt.Errorf("Get called with unsupported argument type") } ================================================ FILE: vendor/google.golang.org/cloud/bigquery/job.go ================================================ // Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "golang.org/x/net/context" bq "google.golang.org/api/bigquery/v2" ) // A Job represents an operation which has been submitted to BigQuery for processing. type Job struct { service service projectID string jobID string } // State is one of a sequence of states that a Job progresses through as it is processed. type State int const ( Pending State = iota Running Done ) // JobStatus contains the current State of a job, and errors encountered while processing that job. type JobStatus struct { State State err error // All errors encountered during the running of the job. // Not all Errors are fatal, so errors here do not necessarily mean that the job has completed or was unsuccessful. Errors []*Error } // jobOption is an Option which modifies a bq.Job proto. // This is used for configuring values that apply to all operations, such as setting a jobReference. type jobOption interface { customizeJob(job *bq.Job, projectID string) } type jobID string // JobID returns an Option that sets the job ID of a BigQuery job. // If this Option is not used, a job ID is generated automatically. func JobID(ID string) Option { return jobID(ID) } func (opt jobID) implementsOption() {} func (opt jobID) customizeJob(job *bq.Job, projectID string) { job.JobReference = &bq.JobReference{ JobId: string(opt), ProjectId: projectID, } } // Done reports whether the job has completed. // After Done returns true, the Err method will return an error if the job completed unsuccesfully. func (s *JobStatus) Done() bool { return s.State == Done } // Err returns the error that caused the job to complete unsuccesfully (if any). func (s *JobStatus) Err() error { return s.err } // Status returns the current status of the job. It fails if the Status could not be determined. func (j *Job) Status(ctx context.Context) (*JobStatus, error) { return j.service.jobStatus(ctx, j.projectID, j.jobID) } ================================================ FILE: vendor/google.golang.org/cloud/bigquery/load_op.go ================================================ // Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "fmt" "golang.org/x/net/context" bq "google.golang.org/api/bigquery/v2" ) type loadOption interface { customizeLoad(conf *bq.JobConfigurationLoad, projectID string) } // A DestinationSchema must be supplied when loading data from Google Cloud Storage into a non-existent table. // Caveat: DestinationSchema is not required if the data being loaded is a datastore backup. func DestinationSchema(schema Schema) Option { return destSchema(schema) } type destSchema Schema func (opt destSchema) implementsOption() {} func (opt destSchema) customizeLoad(conf *bq.JobConfigurationLoad, projectID string) { var fields []*bq.TableFieldSchema for _, f := range opt { fields = append(fields, f.proto()) } if len(fields) > 0 { conf.Schema = &bq.TableSchema{Fields: fields} } } // MaxBadRecords returns an Option that sets the maximum number of bad records that will be ignored. // If this maximum is exceeded, the operation will be unsuccessful. func MaxBadRecords(n int64) Option { return maxBadRecords(n) } type maxBadRecords int64 func (opt maxBadRecords) implementsOption() {} func (opt maxBadRecords) customizeLoad(conf *bq.JobConfigurationLoad, projectID string) { conf.MaxBadRecords = int64(opt) } // AllowJaggedRows returns an Option that causes missing trailing optional columns to be tolerated in CSV data. Missing values are treated as nulls. func AllowJaggedRows() Option { return allowJaggedRows{} } type allowJaggedRows struct{} func (opt allowJaggedRows) implementsOption() {} func (opt allowJaggedRows) customizeLoad(conf *bq.JobConfigurationLoad, projectID string) { conf.AllowJaggedRows = true } // AllowQuotedNewlines returns an Option that allows quoted data sections containing newlines in CSV data. func AllowQuotedNewlines() Option { return allowQuotedNewlines{} } type allowQuotedNewlines struct{} func (opt allowQuotedNewlines) implementsOption() {} func (opt allowQuotedNewlines) customizeLoad(conf *bq.JobConfigurationLoad, projectID string) { conf.AllowQuotedNewlines = true } // IgnoreUnknownValues returns an Option that causes values not matching the schema to be tolerated. // Unknown values are ignored. For CSV this ignores extra values at the end of a line. // For JSON this ignores named values that do not match any column name. // If this Option is not used, records containing unknown values are treated as bad records. // The MaxBadRecords Option can be used to customize how bad records are handled. func IgnoreUnknownValues() Option { return ignoreUnknownValues{} } type ignoreUnknownValues struct{} func (opt ignoreUnknownValues) implementsOption() {} func (opt ignoreUnknownValues) customizeLoad(conf *bq.JobConfigurationLoad, projectID string) { conf.IgnoreUnknownValues = true } func (c *Client) load(ctx context.Context, dst *Table, src *GCSReference, options []Option) (*Job, error) { job, options := initJobProto(c.projectID, options) payload := &bq.JobConfigurationLoad{} dst.customizeLoadDst(payload, c.projectID) src.customizeLoadSrc(payload, c.projectID) for _, opt := range options { o, ok := opt.(loadOption) if !ok { return nil, fmt.Errorf("option (%#v) not applicable to dst/src pair: dst: %T ; src: %T", opt, dst, src) } o.customizeLoad(payload, c.projectID) } job.Configuration = &bq.JobConfiguration{ Load: payload, } return c.service.insertJob(ctx, job, c.projectID) } ================================================ FILE: vendor/google.golang.org/cloud/bigquery/query.go ================================================ // Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import bq "google.golang.org/api/bigquery/v2" // Query represents a query to be executed. type Query struct { // The query to execute. See https://cloud.google.com/bigquery/query-reference for details. Q string // DefaultProjectID and DefaultDatasetID specify the dataset to use for unqualified table names in the query. // If DefaultProjectID is set, DefaultDatasetID must also be set. DefaultProjectID string DefaultDatasetID string } func (q *Query) implementsSource() {} func (q *Query) customizeQuerySrc(conf *bq.JobConfigurationQuery, projectID string) { conf.Query = q.Q if q.DefaultProjectID != "" || q.DefaultDatasetID != "" { conf.DefaultDataset = &bq.DatasetReference{ DatasetId: q.DefaultDatasetID, ProjectId: q.DefaultProjectID, } } } ================================================ FILE: vendor/google.golang.org/cloud/bigquery/query_op.go ================================================ // Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "fmt" "golang.org/x/net/context" bq "google.golang.org/api/bigquery/v2" ) type queryOption interface { customizeQuery(conf *bq.JobConfigurationQuery, projectID string) } // UseQueryCache returns an Option that causes results to be fetched from the query cache if they are available. // The query cache is a best-effort cache that is flushed whenever tables in the query are modified. // Cached results are only available when TableID is unspecified in the query's destination Table. // For more information, see https://cloud.google.com/bigquery/querying-data#querycaching func UseQueryCache() Option { return useQueryCache{} } type useQueryCache struct{} func (opt useQueryCache) implementsOption() {} func (opt useQueryCache) customizeQuery(conf *bq.JobConfigurationQuery, projectID string) { conf.UseQueryCache = true } // JobPriority returns an Option that causes a query to be scheduled with the specified priority. // The default priority is InteractivePriority. // For more information, see https://cloud.google.com/bigquery/querying-data#batchqueries func JobPriority(priority string) Option { return jobPriority(priority) } type jobPriority string func (opt jobPriority) implementsOption() {} func (opt jobPriority) customizeQuery(conf *bq.JobConfigurationQuery, projectID string) { conf.Priority = string(opt) } const ( BatchPriority = "BATCH" InteractivePriority = "INTERACTIVE" ) // TODO(mcgreevy): support large results. // TODO(mcgreevy): support non-flattened results. func (c *Client) query(ctx context.Context, dst *Table, src *Query, options []Option) (*Job, error) { job, options := initJobProto(c.projectID, options) payload := &bq.JobConfigurationQuery{} dst.customizeQueryDst(payload, c.projectID) src.customizeQuerySrc(payload, c.projectID) for _, opt := range options { o, ok := opt.(queryOption) if !ok { return nil, fmt.Errorf("option (%#v) not applicable to dst/src pair: dst: %T ; src: %T", opt, dst, src) } o.customizeQuery(payload, c.projectID) } job.Configuration = &bq.JobConfiguration{ Query: payload, } return c.service.insertJob(ctx, job, c.projectID) } ================================================ FILE: vendor/google.golang.org/cloud/bigquery/read_op.go ================================================ // Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery // RecordsPerRequest returns a ReadOption that sets the number of records to fetch per request when streaming data from BigQuery. func RecordsPerRequest(n int64) ReadOption { return recordsPerRequest(n) } type recordsPerRequest int64 func (opt recordsPerRequest) customizeRead(conf *pagingConf) { conf.recordsPerRequest = int64(opt) conf.setRecordsPerRequest = true } // TODO(mcgreevy): support configurable startIndex and pageToken. func (c *Client) readTable(src *Table, options []ReadOption) (*Iterator, error) { conf := &readTabledataConf{} src.customizeReadSrc(conf) for _, o := range options { o.customizeRead(&conf.paging) } // The iterator takes care of actually fetching the data. it := &Iterator{ conf: conf, s: c.service, } return it, nil } ================================================ FILE: vendor/google.golang.org/cloud/bigquery/schema.go ================================================ // Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import bq "google.golang.org/api/bigquery/v2" // Schema describes the fields in a table or query result. type Schema []*FieldSchema // TODO(mcgreevy): add a function to generate a schema from a struct. type FieldSchema struct { // The field name. // Must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_), // and must start with a letter or underscore. // The maximum length is 128 characters. Name string // A description of the field. The maximum length is 16,384 characters. Description string // Whether the field may contain multiple values. Repeated bool // Whether the field is required. Ignored if Repeated is true. Required bool // The field data type. If Type is Record, then this field contains a nested schema, // which is described by Schema. Type FieldType // Describes the nested schema if Type is set to Record. Schema Schema } func (fs *FieldSchema) proto() *bq.TableFieldSchema { tfs := &bq.TableFieldSchema{ Description: fs.Description, Name: fs.Name, Type: string(fs.Type), } if fs.Repeated { tfs.Mode = "REPEATED" } else if fs.Required { tfs.Mode = "REQUIRED" } // else leave as default, which is interpreted as NULLABLE. for _, f := range fs.Schema { tfs.Fields = append(tfs.Fields, f.proto()) } return tfs } type FieldType string const ( StringFieldType FieldType = "STRING" IntegerFieldType FieldType = "INTEGER" FloatFieldType FieldType = "FLOAT" BooleanFieldType FieldType = "BOOLEAN" TimestampFieldType FieldType = "TIMESTAMP" RecordFieldType FieldType = "RECORD" ) ================================================ FILE: vendor/google.golang.org/cloud/bigquery/service.go ================================================ // Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "fmt" "net/http" "golang.org/x/net/context" bq "google.golang.org/api/bigquery/v2" ) // service provides an internal abstraction to isolate the generated // BigQuery API; most of this package uses this interface instead. // The single implementation, *bigqueryService, contains all the knowledge // of the generated BigQuery API. type service interface { insertJob(ctx context.Context, job *bq.Job, projectId string) (*Job, error) jobStatus(ctx context.Context, projectId, jobID string) (*JobStatus, error) readTabledata(ctx context.Context, conf *readTabledataConf) (*readTabledataResult, error) } type bigqueryService struct { s *bq.Service } func newBigqueryService(client *http.Client) (*bigqueryService, error) { s, err := bq.New(client) if err != nil { return nil, fmt.Errorf("constructing bigquery client: %v", err) } return &bigqueryService{s: s}, nil } func (s *bigqueryService) insertJob(ctx context.Context, job *bq.Job, projectID string) (*Job, error) { // TODO(mcgreevy): use ctx res, err := s.s.Jobs.Insert(projectID, job).Do() if err != nil { return nil, err } return &Job{service: s, projectID: projectID, jobID: res.JobReference.JobId}, nil } type pagingConf struct { pageToken string recordsPerRequest int64 setRecordsPerRequest bool } type readTabledataConf struct { projectID, datasetID, tableID string paging pagingConf } type readTabledataResult struct { pageToken string rows [][]Value totalRows int64 } func (s *bigqueryService) readTabledata(ctx context.Context, conf *readTabledataConf) (*readTabledataResult, error) { list := s.s.Tabledata.List(conf.projectID, conf.datasetID, conf.tableID). PageToken(conf.paging.pageToken) if conf.paging.setRecordsPerRequest { list = list.MaxResults(conf.paging.recordsPerRequest) } res, err := list.Do() if err != nil { return nil, err } var rs [][]Value for _, r := range res.Rows { rs = append(rs, convertRow(r)) } result := &readTabledataResult{ pageToken: res.PageToken, rows: rs, totalRows: res.TotalRows, } return result, nil } func convertRow(r *bq.TableRow) []Value { var values []Value for _, cell := range r.F { values = append(values, cell.V) } return values } func (s *bigqueryService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) { // TODO(mcgreevy): use ctx res, err := s.s.Jobs.Get(projectID, jobID).Do() if err != nil { return nil, err } return jobStatusFromProto(res.Status) } var stateMap = map[string]State{"PENDING": Pending, "RUNNING": Running, "DONE": Done} func jobStatusFromProto(status *bq.JobStatus) (*JobStatus, error) { state, ok := stateMap[status.State] if !ok { return nil, fmt.Errorf("unexpected job state: %v", status.State) } newStatus := &JobStatus{ State: state, err: nil, } if err := errorFromErrorProto(status.ErrorResult); state == Done && err != nil { newStatus.err = err } for _, ep := range status.Errors { newStatus.Errors = append(newStatus.Errors, errorFromErrorProto(ep)) } return newStatus, nil } ================================================ FILE: vendor/google.golang.org/cloud/bigquery/table.go ================================================ // Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import bq "google.golang.org/api/bigquery/v2" // A Table is a reference to a BigQuery table. type Table struct { // ProjectID, DatasetID and TableID may be omitted if the Table is the destination for a query. // In this case the result will be stored in an ephemeral table. ProjectID string DatasetID string // TableID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). // The maximum length is 1,024 characters. TableID string // All following fields are optional. CreateDisposition CreateDisposition // default is CreateIfNeeded. WriteDisposition WriteDisposition // default is WriteAppend. } // Tables is a group of tables. The tables may belong to differing projects or datasets. type Tables []*Table // CreateDisposition specifies the circumstances under which destination table will be created. type CreateDisposition string const ( // The table will be created if it does not already exist. Tables are created atomically on successful completion of a job. CreateIfNeeded CreateDisposition = "CREATE_IF_NEEDED" // The table must already exist and will not be automatically created. CreateNever CreateDisposition = "CREATE_NEVER" ) // WriteDisposition specifies how existing data in a destination table is treated. type WriteDisposition string const ( // Data will be appended to any existing data in the destination table. // Data is appended atomically on successful completion of a job. WriteAppend WriteDisposition = "WRITE_APPEND" // Existing data in the destination table will be overwritten. // Data is overwritten atomically on successful completion of a job. WriteTruncate WriteDisposition = "WRITE_TRUNCATE" // Writes will fail if the destination table already contains data. WriteEmpty WriteDisposition = "WRITE_EMPTY" ) func (t *Table) implementsSource() {} func (t *Table) implementsReadSource() {} func (t *Table) implementsDestination() {} func (ts Tables) implementsSource() {} func (t *Table) tableRefProto() *bq.TableReference { return &bq.TableReference{ ProjectId: t.ProjectID, DatasetId: t.DatasetID, TableId: t.TableID, } } // implicitTable reports whether Table is an empty placeholder, which signifies that a new table should be created with an auto-generated Table ID. func (t *Table) implicitTable() bool { return t.ProjectID == "" && t.DatasetID == "" && t.TableID == "" } func (t *Table) customizeLoadDst(conf *bq.JobConfigurationLoad, projectID string) { conf.DestinationTable = t.tableRefProto() conf.CreateDisposition = string(t.CreateDisposition) conf.WriteDisposition = string(t.WriteDisposition) } func (t *Table) customizeExtractSrc(conf *bq.JobConfigurationExtract, projectID string) { conf.SourceTable = t.tableRefProto() } func (t *Table) customizeCopyDst(conf *bq.JobConfigurationTableCopy, projectID string) { conf.DestinationTable = t.tableRefProto() conf.CreateDisposition = string(t.CreateDisposition) conf.WriteDisposition = string(t.WriteDisposition) } func (ts Tables) customizeCopySrc(conf *bq.JobConfigurationTableCopy, projectID string) { for _, t := range ts { conf.SourceTables = append(conf.SourceTables, t.tableRefProto()) } } func (t *Table) customizeQueryDst(conf *bq.JobConfigurationQuery, projectID string) { if !t.implicitTable() { conf.DestinationTable = t.tableRefProto() } conf.CreateDisposition = string(t.CreateDisposition) conf.WriteDisposition = string(t.WriteDisposition) } func (t *Table) customizeReadSrc(conf *readTabledataConf) { conf.projectID = t.ProjectID conf.datasetID = t.DatasetID conf.tableID = t.TableID } ================================================ FILE: vendor/google.golang.org/cloud/bigquery/value.go ================================================ // Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery // Value stores the contents of a single cell from a BigQuery result. type Value interface{} // ValueLoader stores a slice of Values representing a result row from a Read operation. // See Iterator.Get for more information. type ValueLoader interface { Load(v []Value) error } // ValueList converts a []Value to implement ValueLoader. type ValueList []Value // Load stores a sequence of values in a ValueList. func (vs *ValueList) Load(v []Value) error { *vs = append(*vs, v...) return nil } ================================================ FILE: vendor/google.golang.org/cloud/bigtable/admin.go ================================================ /* Copyright 2015 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package bigtable import ( "fmt" "strings" "golang.org/x/net/context" "google.golang.org/cloud" btcspb "google.golang.org/cloud/bigtable/internal/cluster_service_proto" bttspb "google.golang.org/cloud/bigtable/internal/table_service_proto" "google.golang.org/grpc" ) const adminAddr = "bigtabletableadmin.googleapis.com:443" // AdminClient is a client type for performing admin operations on a specific cluster. type AdminClient struct { conn *grpc.ClientConn tClient bttspb.BigtableTableServiceClient cClient btcspb.BigtableClusterServiceClient project, zone, cluster string } // NewAdminClient creates a new AdminClient for a given project, zone and cluster. func NewAdminClient(ctx context.Context, project, zone, cluster string, opts ...cloud.ClientOption) (*AdminClient, error) { o := []cloud.ClientOption{ cloud.WithEndpoint(adminAddr), cloud.WithScopes(AdminScope), } o = append(o, opts...) conn, err := cloud.DialGRPC(ctx, o...) if err != nil { return nil, fmt.Errorf("dialing: %v", err) } return &AdminClient{ conn: conn, tClient: bttspb.NewBigtableTableServiceClient(conn), cClient: btcspb.NewBigtableClusterServiceClient(conn), project: project, zone: zone, cluster: cluster, }, nil } // Close closes the AdminClient. func (ac *AdminClient) Close() { ac.conn.Close() } func (ac *AdminClient) clusterPrefix() string { return fmt.Sprintf("projects/%s/zones/%s/clusters/%s", ac.project, ac.zone, ac.cluster) } // Tables returns a list of the tables in the cluster. func (ac *AdminClient) Tables(ctx context.Context) ([]string, error) { prefix := ac.clusterPrefix() req := &bttspb.ListTablesRequest{ Name: prefix, } res, err := ac.tClient.ListTables(ctx, req) if err != nil { return nil, err } names := make([]string, 0, len(res.Tables)) for _, tbl := range res.Tables { names = append(names, strings.TrimPrefix(tbl.Name, prefix+"/tables/")) } return names, nil } // CreateTable creates a new table in the cluster. // This method may return before the table's creation is complete. func (ac *AdminClient) CreateTable(ctx context.Context, table string) error { prefix := ac.clusterPrefix() req := &bttspb.CreateTableRequest{ Name: prefix, TableId: table, } _, err := ac.tClient.CreateTable(ctx, req) if err != nil { return err } return nil } // CreateColumnFamily creates a new column family in a table. func (ac *AdminClient) CreateColumnFamily(ctx context.Context, table, family string) error { // TODO(dsymonds): Permit specifying gcexpr and any other family settings. prefix := ac.clusterPrefix() req := &bttspb.CreateColumnFamilyRequest{ Name: prefix + "/tables/" + table, ColumnFamilyId: family, } _, err := ac.tClient.CreateColumnFamily(ctx, req) return err } // DeleteTable deletes a table and all of its data. func (ac *AdminClient) DeleteTable(ctx context.Context, table string) error { prefix := ac.clusterPrefix() req := &bttspb.DeleteTableRequest{ Name: prefix + "/tables/" + table, } _, err := ac.tClient.DeleteTable(ctx, req) return err } // DeleteColumnFamily deletes a column family in a table and all of its data. func (ac *AdminClient) DeleteColumnFamily(ctx context.Context, table, family string) error { prefix := ac.clusterPrefix() req := &bttspb.DeleteColumnFamilyRequest{ Name: prefix + "/tables/" + table + "/columnFamilies/" + family, } _, err := ac.tClient.DeleteColumnFamily(ctx, req) return err } // TableInfo represents information about a table. type TableInfo struct { Families []string } // TableInfo retrieves information about a table. func (ac *AdminClient) TableInfo(ctx context.Context, table string) (*TableInfo, error) { prefix := ac.clusterPrefix() req := &bttspb.GetTableRequest{ Name: prefix + "/tables/" + table, } res, err := ac.tClient.GetTable(ctx, req) if err != nil { return nil, err } ti := &TableInfo{} for fam := range res.ColumnFamilies { ti.Families = append(ti.Families, fam) } return ti, nil } // SetClusterSize sets the number of server nodes for this cluster. func (ac *AdminClient) SetClusterSize(ctx context.Context, nodes int) error { req := &btcspb.GetClusterRequest{ Name: ac.clusterPrefix(), } clu, err := ac.cClient.GetCluster(ctx, req) if err != nil { return err } clu.ServeNodes = int32(nodes) _, err = ac.cClient.UpdateCluster(ctx, clu) return err } ================================================ FILE: vendor/google.golang.org/cloud/bigtable/bigtable.go ================================================ /* Copyright 2015 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package bigtable import ( "fmt" "io" "strconv" "time" "github.com/golang/protobuf/proto" "golang.org/x/net/context" "google.golang.org/cloud" btdpb "google.golang.org/cloud/bigtable/internal/data_proto" btspb "google.golang.org/cloud/bigtable/internal/service_proto" "google.golang.org/grpc" ) const prodAddr = "bigtable.googleapis.com:443" // Client is a client for reading and writing data to tables in a cluster. type Client struct { conn *grpc.ClientConn client btspb.BigtableServiceClient project, zone, cluster string } // NewClient creates a new Client for a given project, zone and cluster. func NewClient(ctx context.Context, project, zone, cluster string, opts ...cloud.ClientOption) (*Client, error) { o := []cloud.ClientOption{ cloud.WithEndpoint(prodAddr), cloud.WithScopes(Scope), } o = append(o, opts...) conn, err := cloud.DialGRPC(ctx, o...) if err != nil { return nil, fmt.Errorf("dialing: %v", err) } return &Client{ conn: conn, client: btspb.NewBigtableServiceClient(conn), project: project, zone: zone, cluster: cluster, }, nil } // Close closes the Client. func (c *Client) Close() { c.conn.Close() } func (c *Client) fullTableName(table string) string { return fmt.Sprintf("projects/%s/zones/%s/clusters/%s/tables/%s", c.project, c.zone, c.cluster, table) } // A Table refers to a table. type Table struct { c *Client table string } // Open opens a table. func (c *Client) Open(table string) *Table { return &Table{ c: c, table: table, } } // TODO(dsymonds): Read method that returns a sequence of ReadItems. // ReadRows reads rows from a table. f is called for each row. // If f returns false, the stream is shut down and ReadRows returns. // f owns its argument, and f is called serially. // // By default, the yielded rows will contain all values in all cells. // Use RowFilter to limit the cells returned. func (t *Table) ReadRows(ctx context.Context, arg RowRange, f func(Row) bool, opts ...ReadOption) error { req := &btspb.ReadRowsRequest{ TableName: t.c.fullTableName(t.table), RowRange: arg.proto(), } for _, opt := range opts { opt.set(req) } ctx, cancel := context.WithCancel(ctx) // for aborting the stream stream, err := t.c.client.ReadRows(ctx, req) if err != nil { return err } cr := new(chunkReader) for { res, err := stream.Recv() if err == io.EOF { break } if err != nil { return err } if row := cr.process(res); row != nil { if !f(row) { // Cancel and drain stream. cancel() for { if _, err := stream.Recv(); err != nil { return nil } } } } } return nil } // ReadRow is a convenience implementation of a single-row reader. // A missing row will return a zero-length map and a nil error. func (t *Table) ReadRow(ctx context.Context, row string, opts ...ReadOption) (Row, error) { var r Row err := t.ReadRows(ctx, SingleRow(row), func(rr Row) bool { r = rr return true }, opts...) return r, err } type chunkReader struct { partial map[string]Row // incomplete rows } // process handles a single btspb.ReadRowsResponse. // If it completes a row, that row is returned. func (cr *chunkReader) process(rrr *btspb.ReadRowsResponse) Row { if cr.partial == nil { cr.partial = make(map[string]Row) } row := string(rrr.RowKey) r := cr.partial[row] if r == nil { r = make(Row) cr.partial[row] = r } for _, chunk := range rrr.Chunks { if chunk.ResetRow { r = make(Row) cr.partial[row] = r continue } if chunk.CommitRow { delete(cr.partial, row) return r // assume that this is the last chunk } decodeFamilyProto(r, row, chunk.RowContents) } return nil } // decodeFamilyProto adds the cell data from f to the given row. func decodeFamilyProto(r Row, row string, f *btdpb.Family) { fam := f.Name // does not have colon for _, col := range f.Columns { for _, cell := range col.Cells { ri := ReadItem{ Row: row, Column: fmt.Sprintf("%s:%s", fam, col.Qualifier), Timestamp: Timestamp(cell.TimestampMicros), Value: cell.Value, } r[fam] = append(r[fam], ri) } } } // A RowRange is used to describe the rows to be read. // A RowRange is a half-open interval [Start, Limit) encompassing // all the rows with keys at least as large as Start, and less than Limit. // (Bigtable string comparison is the same as Go's.) // A RowRange can be unbounded, encompassing all keys at least as large as Start. type RowRange struct { start string limit string } // NewRange returns the new RowRange [begin, end). func NewRange(begin, end string) RowRange { return RowRange{ start: begin, limit: end, } } // Unbounded tests whether a RowRange is unbounded. func (r RowRange) Unbounded() bool { return r.limit == "" } // Contains says whether the RowRange contains the key. func (r RowRange) Contains(row string) bool { return r.start <= row && (r.limit == "" || r.limit > row) } // String provides a printable description of a RowRange. func (r RowRange) String() string { a := strconv.Quote(r.start) if r.Unbounded() { return fmt.Sprintf("[%s,∞)", a) } return fmt.Sprintf("[%s,%q)", a, r.limit) } func (r RowRange) proto() *btdpb.RowRange { if r.Unbounded() { return &btdpb.RowRange{StartKey: []byte(r.start)} } return &btdpb.RowRange{ StartKey: []byte(r.start), EndKey: []byte(r.limit), } } // SingleRow returns a RowRange for reading a single row. func SingleRow(row string) RowRange { return RowRange{ start: row, limit: row + "\x00", } } // PrefixRange returns a RowRange consisting of all keys starting with the prefix. func PrefixRange(prefix string) RowRange { return RowRange{ start: prefix, limit: prefixSuccessor(prefix), } } // InfiniteRange returns the RowRange consisting of all keys at least as // large as start. func InfiniteRange(start string) RowRange { return RowRange{ start: start, limit: "", } } // prefixSuccessor returns the lexically smallest string greater than the // prefix, if it exists, or "" otherwise. In either case, it is the string // needed for the Limit of a RowRange. func prefixSuccessor(prefix string) string { if prefix == "" { return "" // infinite range } n := len(prefix) for n--; n >= 0 && prefix[n] == '\xff'; n-- { } if n == -1 { return "" } ans := []byte(prefix[:n]) ans = append(ans, prefix[n]+1) return string(ans) } // A ReadOption is an optional argument to ReadRows. type ReadOption interface { set(req *btspb.ReadRowsRequest) } // RowFilter returns a ReadOption that applies f to the contents of read rows. func RowFilter(f Filter) ReadOption { return rowFilter{f} } type rowFilter struct{ f Filter } func (rf rowFilter) set(req *btspb.ReadRowsRequest) { req.Filter = rf.f.proto() } // LimitRows returns a ReadOption that will limit the number of rows to be read. func LimitRows(limit int64) ReadOption { return limitRows{limit} } type limitRows struct{ limit int64 } func (lr limitRows) set(req *btspb.ReadRowsRequest) { req.NumRowsLimit = lr.limit } // A Row is returned by ReadRow. The map is keyed by column family (the prefix // of the column name before the colon). The values are the returned ReadItems // for that column family in the order returned by Read. type Row map[string][]ReadItem // Key returns the row's key, or "" if the row is empty. func (r Row) Key() string { for _, items := range r { if len(items) > 0 { return items[0].Row } } return "" } // A ReadItem is returned by Read. A ReadItem contains data from a specific row and column. type ReadItem struct { Row, Column string Timestamp Timestamp Value []byte } // Apply applies a Mutation to a specific row. func (t *Table) Apply(ctx context.Context, row string, m *Mutation, opts ...ApplyOption) error { after := func(res proto.Message) { for _, o := range opts { o.after(res) } } if m.cond == nil { req := &btspb.MutateRowRequest{ TableName: t.c.fullTableName(t.table), RowKey: []byte(row), Mutations: m.ops, } res, err := t.c.client.MutateRow(ctx, req) if err == nil { after(res) } return err } req := &btspb.CheckAndMutateRowRequest{ TableName: t.c.fullTableName(t.table), RowKey: []byte(row), PredicateFilter: m.cond.proto(), } if m.mtrue != nil { req.TrueMutations = m.mtrue.ops } if m.mfalse != nil { req.FalseMutations = m.mfalse.ops } res, err := t.c.client.CheckAndMutateRow(ctx, req) if err == nil { after(res) } return err } // An ApplyOption is an optional argument to Apply. type ApplyOption interface { after(res proto.Message) } type applyAfterFunc func(res proto.Message) func (a applyAfterFunc) after(res proto.Message) { a(res) } // GetCondMutationResult returns an ApplyOption that reports whether the conditional // mutation's condition matched. func GetCondMutationResult(matched *bool) ApplyOption { return applyAfterFunc(func(res proto.Message) { if res, ok := res.(*btspb.CheckAndMutateRowResponse); ok { *matched = res.PredicateMatched } }) } // Mutation represents a set of changes for a single row of a table. type Mutation struct { ops []*btdpb.Mutation // for conditional mutations cond Filter mtrue, mfalse *Mutation } // NewMutation returns a new mutation. func NewMutation() *Mutation { return new(Mutation) } // NewCondMutation returns a conditional mutation. // The given row filter determines which mutation is applied: // If the filter matches any cell in the row, mtrue is applied; // otherwise, mfalse is applied. // Either given mutation may be nil. func NewCondMutation(cond Filter, mtrue, mfalse *Mutation) *Mutation { return &Mutation{cond: cond, mtrue: mtrue, mfalse: mfalse} } // Set sets a value in a specified column, with the given timestamp. // The timestamp will be truncated to millisecond resolution. // A timestamp of ServerTime means to use the server timestamp. func (m *Mutation) Set(family, column string, ts Timestamp, value []byte) { if ts != ServerTime { // Truncate to millisecond resolution, since that's the default table config. // TODO(dsymonds): Provide a way to override this behaviour. ts -= ts % 1000 } m.ops = append(m.ops, &btdpb.Mutation{SetCell: &btdpb.Mutation_SetCell{ FamilyName: family, ColumnQualifier: []byte(column), TimestampMicros: int64(ts), Value: value, }}) } // DeleteCellsInColumn will delete all the cells whose columns are family:column. func (m *Mutation) DeleteCellsInColumn(family, column string) { m.ops = append(m.ops, &btdpb.Mutation{DeleteFromColumn: &btdpb.Mutation_DeleteFromColumn{ FamilyName: family, ColumnQualifier: []byte(column), }}) } // DeleteTimestampRange deletes all cells whose columns are family:column // and whose timestamps are in the half-open interval [start, end). // If end is zero, it will be interpreted as infinity. func (m *Mutation) DeleteTimestampRange(family, column string, start, end Timestamp) { m.ops = append(m.ops, &btdpb.Mutation{DeleteFromColumn: &btdpb.Mutation_DeleteFromColumn{ FamilyName: family, ColumnQualifier: []byte(column), TimeRange: &btdpb.TimestampRange{ StartTimestampMicros: int64(start), EndTimestampMicros: int64(end), }, }}) } // DeleteCellsInFamily will delete all the cells whose columns are family:*. func (m *Mutation) DeleteCellsInFamily(family string) { m.ops = append(m.ops, &btdpb.Mutation{DeleteFromFamily: &btdpb.Mutation_DeleteFromFamily{ FamilyName: family, }}) } // DeleteRow deletes the entire row. func (m *Mutation) DeleteRow() { m.ops = append(m.ops, &btdpb.Mutation{DeleteFromRow: &btdpb.Mutation_DeleteFromRow{}}) } // Timestamp is in units of microseconds since 1 January 1970. type Timestamp int64 // ServerTime is a specific Timestamp that may be passed to (*Mutation).Set. // It indicates that the server's timestamp should be used. const ServerTime Timestamp = -1 // Time converts a time.Time into a Timestamp. func Time(t time.Time) Timestamp { return Timestamp(t.UnixNano() / 1e3) } // Now returns the Timestamp representation of the current time on the client. func Now() Timestamp { return Time(time.Now()) } // Time converts a Timestamp into a time.Time. func (ts Timestamp) Time() time.Time { return time.Unix(0, int64(ts)*1e3) } // ApplyReadModifyWrite applies a ReadModifyWrite to a specific row. // It returns the newly written cells. func (t *Table) ApplyReadModifyWrite(ctx context.Context, row string, m *ReadModifyWrite) (Row, error) { req := &btspb.ReadModifyWriteRowRequest{ TableName: t.c.fullTableName(t.table), RowKey: []byte(row), Rules: m.ops, } res, err := t.c.client.ReadModifyWriteRow(ctx, req) if err != nil { return nil, err } r := make(Row) for _, fam := range res.Families { // res is *btdpb.Row, fam is *btdpb.Family decodeFamilyProto(r, row, fam) } return r, nil } // ReadModifyWrite represents a set of operations on a single row of a table. // It is like Mutation but for non-idempotent changes. // When applied, these operations operate on the latest values of the row's cells, // and result in a new value being written to the relevant cell with a timestamp // that is max(existing timestamp, current server time). // // The application of a ReadModifyWrite is atomic; concurrent ReadModifyWrites will // be executed serially by the server. type ReadModifyWrite struct { ops []*btdpb.ReadModifyWriteRule } // NewReadModifyWrite returns a new ReadModifyWrite. func NewReadModifyWrite() *ReadModifyWrite { return new(ReadModifyWrite) } // AppendValue appends a value to a specific cell's value. // If the cell is unset, it will be treated as an empty value. func (m *ReadModifyWrite) AppendValue(family, column string, v []byte) { m.ops = append(m.ops, &btdpb.ReadModifyWriteRule{ FamilyName: family, ColumnQualifier: []byte(column), AppendValue: v, }) } // Increment interprets the value in a specific cell as a 64-bit big-endian signed integer, // and adds a value to it. If the cell is unset, it will be treated as zero. // If the cell is set and is not an 8-byte value, the entire ApplyReadModifyWrite // operation will fail. func (m *ReadModifyWrite) Increment(family, column string, delta int64) { m.ops = append(m.ops, &btdpb.ReadModifyWriteRule{ FamilyName: family, ColumnQualifier: []byte(column), IncrementAmount: delta, }) } ================================================ FILE: vendor/google.golang.org/cloud/bigtable/bttest/inmem.go ================================================ /* Copyright 2015 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Package bttest contains test helpers for working with the bigtable package. To use a Server, create it, and then connect to it with no security: (The project/zone/cluster values are ignored.) srv, err := bttest.NewServer() ... client, err := bigtable.NewClient(ctx, proj, zone, cluster, bigtable.WithCredentials(nil), bigtable.WithInsecureAddr(srv.Addr)) ... */ package bttest import ( "encoding/binary" "fmt" "log" "net" "regexp" "sort" "strings" "sync" "golang.org/x/net/context" btdpb "google.golang.org/cloud/bigtable/internal/data_proto" emptypb "google.golang.org/cloud/bigtable/internal/empty" btspb "google.golang.org/cloud/bigtable/internal/service_proto" bttdpb "google.golang.org/cloud/bigtable/internal/table_data_proto" bttspb "google.golang.org/cloud/bigtable/internal/table_service_proto" "google.golang.org/grpc" ) // Server is an in-memory Cloud Bigtable fake. // It is unauthenticated, and only a rough approximation. type Server struct { Addr string l net.Listener srv *grpc.Server s *server } // server is the real implementation of the fake. // It is a separate and unexported type so the API won't be cluttered with // methods that are only relevant to the fake's implementation. type server struct { mu sync.Mutex tables map[string]*table // keyed by fully qualified name // Any unimplemented methods will cause a panic. bttspb.BigtableTableServiceServer btspb.BigtableServiceServer } // NewServer creates a new Server. The Server will be listening for gRPC connections // at the address named by the Addr field, without TLS. func NewServer() (*Server, error) { l, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { return nil, err } s := &Server{ Addr: l.Addr().String(), l: l, srv: grpc.NewServer(), s: &server{ tables: make(map[string]*table), }, } bttspb.RegisterBigtableTableServiceServer(s.srv, s.s) btspb.RegisterBigtableServiceServer(s.srv, s.s) go s.srv.Serve(s.l) return s, nil } // Close shuts down the server. func (s *Server) Close() { s.srv.Stop() s.l.Close() } func (s *server) CreateTable(ctx context.Context, req *bttspb.CreateTableRequest) (*bttdpb.Table, error) { tbl := req.Name + "/tables/" + req.TableId s.mu.Lock() if _, ok := s.tables[tbl]; ok { s.mu.Unlock() return nil, fmt.Errorf("table %q already exists", tbl) } s.tables[tbl] = newTable() s.mu.Unlock() return &bttdpb.Table{Name: tbl}, nil } func (s *server) ListTables(ctx context.Context, req *bttspb.ListTablesRequest) (*bttspb.ListTablesResponse, error) { res := &bttspb.ListTablesResponse{} prefix := req.Name + "/tables/" s.mu.Lock() for tbl := range s.tables { if strings.HasPrefix(tbl, prefix) { res.Tables = append(res.Tables, &bttdpb.Table{Name: tbl}) } } s.mu.Unlock() return res, nil } func (s *server) DeleteTable(ctx context.Context, req *bttspb.DeleteTableRequest) (*emptypb.Empty, error) { s.mu.Lock() defer s.mu.Unlock() if _, ok := s.tables[req.Name]; !ok { return nil, fmt.Errorf("no such table %q", req.Name) } delete(s.tables, req.Name) return &emptypb.Empty{}, nil } func (s *server) CreateColumnFamily(ctx context.Context, req *bttspb.CreateColumnFamilyRequest) (*bttdpb.ColumnFamily, error) { s.mu.Lock() tbl, ok := s.tables[req.Name] s.mu.Unlock() if !ok { return nil, fmt.Errorf("no such table %q", req.Name) } // Check it is unique and record it. fam := req.ColumnFamilyId tbl.mu.Lock() defer tbl.mu.Unlock() if _, ok := tbl.families[fam]; ok { return nil, fmt.Errorf("family %q already exists", fam) } tbl.families[fam] = true return &bttdpb.ColumnFamily{ Name: req.Name + "/families/" + fam, }, nil } func (s *server) ReadRows(req *btspb.ReadRowsRequest, stream btspb.BigtableService_ReadRowsServer) error { s.mu.Lock() tbl, ok := s.tables[req.TableName] s.mu.Unlock() if !ok { return fmt.Errorf("no such table %q", req.TableName) } var start, end string // half-open interval if rr := req.RowRange; rr != nil { start, end = string(rr.StartKey), string(rr.EndKey) } else { // A single row read is simply an edge case. start = string(req.RowKey) end = start + "\x00" } // Get rows to stream back. tbl.mu.RLock() si, ei := 0, len(tbl.rows) // half-open interval if start != "" { si = sort.Search(len(tbl.rows), func(i int) bool { return tbl.rows[i].key >= start }) } if end != "" { ei = sort.Search(len(tbl.rows), func(i int) bool { return tbl.rows[i].key >= end }) } if si >= ei { tbl.mu.RUnlock() return nil } rows := make([]*row, ei-si) copy(rows, tbl.rows[si:ei]) tbl.mu.RUnlock() for _, r := range rows { if err := streamRow(stream, r, req.Filter); err != nil { return err } } return nil } func streamRow(stream btspb.BigtableService_ReadRowsServer, r *row, f *btdpb.RowFilter) error { r.mu.Lock() defer r.mu.Unlock() rrr := &btspb.ReadRowsResponse{ RowKey: []byte(r.key), } for col, cs := range r.cells { i := strings.Index(col, ":") // guaranteed to exist fam, col := col[:i], col[i+1:] cells := filterCells(f, r, fam, col, cs) if len(cells) == 0 { continue } // TODO(dsymonds): Apply transformers. chunk := &btspb.ReadRowsResponse_Chunk{ RowContents: &btdpb.Family{ Name: fam, Columns: []*btdpb.Column{{ Qualifier: []byte(col), // Cells is populated below. }}, }, } colm := chunk.RowContents.Columns[0] for _, cell := range cells { colm.Cells = append(colm.Cells, &btdpb.Cell{ TimestampMicros: cell.ts, Value: cell.value, }) } rrr.Chunks = append(rrr.Chunks, chunk) } rrr.Chunks = append(rrr.Chunks, &btspb.ReadRowsResponse_Chunk{CommitRow: true}) return stream.Send(rrr) } func filterCells(f *btdpb.RowFilter, r *row, fam, col string, cs []cell) []cell { // Special handling for cells_per_column_limit_filter. if f != nil && f.CellsPerColumnLimitFilter > 0 { n := int(f.CellsPerColumnLimitFilter) if n > len(cs) { n = len(cs) } return cs[:n] } var ret []cell for _, cell := range cs { if includeCell(f, r, fam, col, cell) { ret = append(ret, cell) } } return ret } func includeCell(f *btdpb.RowFilter, r *row, fam, col string, cell cell) bool { if f == nil { return true } // TODO(dsymonds): Implement many more filters. switch { default: log.Printf("WARNING: don't know how to handle filter (ignoring it): %v", f) return true case f.Chain != nil: for _, sub := range f.Chain.Filters { if !includeCell(sub, r, fam, col, cell) { return false } } return true case len(f.ColumnQualifierRegexFilter) > 0: pat := string(f.ColumnQualifierRegexFilter) rx, err := regexp.Compile(pat) if err != nil { log.Printf("Bad column_qualifier_regex_filter pattern %q: %v", pat, err) return false } return rx.MatchString(col) case len(f.ValueRegexFilter) > 0: pat := string(f.ValueRegexFilter) rx, err := regexp.Compile(pat) if err != nil { log.Printf("Bad value_regex_filter pattern %q: %v", pat, err) return false } return rx.Match(cell.value) } } func (s *server) MutateRow(ctx context.Context, req *btspb.MutateRowRequest) (*emptypb.Empty, error) { s.mu.Lock() tbl, ok := s.tables[req.TableName] s.mu.Unlock() if !ok { return nil, fmt.Errorf("no such table %q", req.TableName) } r := tbl.mutableRow(string(req.RowKey)) r.mu.Lock() defer r.mu.Unlock() if err := applyMutations(tbl, r, req.Mutations); err != nil { return nil, err } return &emptypb.Empty{}, nil } func (s *server) CheckAndMutateRow(ctx context.Context, req *btspb.CheckAndMutateRowRequest) (*btspb.CheckAndMutateRowResponse, error) { s.mu.Lock() tbl, ok := s.tables[req.TableName] s.mu.Unlock() if !ok { return nil, fmt.Errorf("no such table %q", req.TableName) } res := &btspb.CheckAndMutateRowResponse{} r := tbl.mutableRow(string(req.RowKey)) r.mu.Lock() defer r.mu.Unlock() // Figure out which mutation to apply. whichMut := false if req.PredicateFilter == nil { // Use true_mutations iff row contains any cells. whichMut = len(r.cells) > 0 } else { // Use true_mutations iff any cells in the row match the filter. for col, cs := range r.cells { i := strings.Index(col, ":") // guaranteed to exist fam, col := col[:i], col[i+1:] for _, cell := range cs { if includeCell(req.PredicateFilter, r, fam, col, cell) { whichMut = true break } } if whichMut { break } } // TODO(dsymonds): Figure out if this is supposed to be set // even when there's no predicate filter. res.PredicateMatched = whichMut } muts := req.FalseMutations if whichMut { muts = req.TrueMutations } if err := applyMutations(tbl, r, muts); err != nil { return nil, err } return res, nil } // applyMutations applies a sequence of mutations to a row. // It assumes r.mu is locked. func applyMutations(tbl *table, r *row, muts []*btdpb.Mutation) error { for _, mut := range muts { switch { default: return fmt.Errorf("can't handle mutation %v", mut) case mut.SetCell != nil: set := mut.SetCell tbl.mu.RLock() famOK := tbl.families[set.FamilyName] tbl.mu.RUnlock() if !famOK { return fmt.Errorf("unknown family %q", set.FamilyName) } if !tbl.validTimestamp(set.TimestampMicros) { return fmt.Errorf("invalid timestamp %d", set.TimestampMicros) } col := fmt.Sprintf("%s:%s", set.FamilyName, set.ColumnQualifier) cs := r.cells[col] newCell := cell{ts: set.TimestampMicros, value: set.Value} replaced := false for i, cell := range cs { if cell.ts == newCell.ts { cs[i] = newCell replaced = true break } } if !replaced { cs = append(cs, newCell) } sort.Sort(byDescTS(cs)) r.cells[col] = cs case mut.DeleteFromColumn != nil: del := mut.DeleteFromColumn col := fmt.Sprintf("%s:%s", del.FamilyName, del.ColumnQualifier) cs := r.cells[col] if del.TimeRange != nil { tsr := del.TimeRange if !tbl.validTimestamp(tsr.StartTimestampMicros) { return fmt.Errorf("invalid timestamp %d", tsr.StartTimestampMicros) } if !tbl.validTimestamp(tsr.EndTimestampMicros) { return fmt.Errorf("invalid timestamp %d", tsr.EndTimestampMicros) } // Find half-open interval to remove. // Cells are in descending timestamp order, // so the predicates to sort.Search are inverted. si, ei := 0, len(cs) if tsr.StartTimestampMicros > 0 { ei = sort.Search(len(cs), func(i int) bool { return cs[i].ts < tsr.StartTimestampMicros }) } if tsr.EndTimestampMicros > 0 { si = sort.Search(len(cs), func(i int) bool { return cs[i].ts < tsr.EndTimestampMicros }) } if si < ei { copy(cs[si:], cs[ei:]) cs = cs[:len(cs)-(ei-si)] } } else { cs = nil } if len(cs) == 0 { delete(r.cells, col) } else { r.cells[col] = cs } } } return nil } func (s *server) ReadModifyWriteRow(ctx context.Context, req *btspb.ReadModifyWriteRowRequest) (*btdpb.Row, error) { s.mu.Lock() tbl, ok := s.tables[req.TableName] s.mu.Unlock() if !ok { return nil, fmt.Errorf("no such table %q", req.TableName) } updates := make(map[string]cell) // copy of updated cells; keyed by full column name r := tbl.mutableRow(string(req.RowKey)) r.mu.Lock() defer r.mu.Unlock() // Assume all mutations apply to the most recent version of the cell. // TODO(dsymonds): Verify this assumption and document it in the proto. for _, rule := range req.Rules { key := fmt.Sprintf("%s:%s", rule.FamilyName, rule.ColumnQualifier) newCell := false if len(r.cells[key]) == 0 { r.cells[key] = []cell{{ // TODO(dsymonds): should this set a timestamp? }} newCell = true } cell := &r.cells[key][0] if len(rule.AppendValue) > 0 { cell.value = append(cell.value, rule.AppendValue...) } if rule.IncrementAmount != 0 { var v int64 if !newCell { if len(cell.value) != 8 { return nil, fmt.Errorf("increment on non-64-bit value") } v = int64(binary.BigEndian.Uint64(cell.value)) } v += rule.IncrementAmount var val [8]byte binary.BigEndian.PutUint64(val[:], uint64(v)) cell.value = val[:] } updates[key] = *cell } res := &btdpb.Row{ Key: req.RowKey, } for col, cell := range updates { i := strings.Index(col, ":") fam, qual := col[:i], col[i+1:] var f *btdpb.Family for _, ff := range res.Families { if ff.Name == fam { f = ff break } } if f == nil { f = &btdpb.Family{Name: fam} res.Families = append(res.Families, f) } f.Columns = append(f.Columns, &btdpb.Column{ Qualifier: []byte(qual), Cells: []*btdpb.Cell{{ Value: cell.value, }}, }) } return res, nil } type table struct { mu sync.RWMutex families map[string]bool // keyed by plain family name rows []*row // sorted by row key rowIndex map[string]*row // indexed by row key } func newTable() *table { return &table{ families: make(map[string]bool), rowIndex: make(map[string]*row), } } func (t *table) validTimestamp(ts int64) bool { // Assume millisecond granularity is required. return ts%1000 == 0 } func (t *table) mutableRow(row string) *row { // Try fast path first. t.mu.RLock() r := t.rowIndex[row] t.mu.RUnlock() if r != nil { return r } // We probably need to create the row. t.mu.Lock() r = t.rowIndex[row] if r == nil { r = newRow(row) t.rowIndex[row] = r t.rows = append(t.rows, r) sort.Sort(byRowKey(t.rows)) // yay, inefficient! } t.mu.Unlock() return r } type byRowKey []*row func (b byRowKey) Len() int { return len(b) } func (b byRowKey) Swap(i, j int) { b[i], b[j] = b[j], b[i] } func (b byRowKey) Less(i, j int) bool { return b[i].key < b[j].key } type row struct { key string mu sync.Mutex cells map[string][]cell // keyed by full column name; cells are in descending timestamp order } func newRow(key string) *row { return &row{ key: key, cells: make(map[string][]cell), } } type cell struct { ts int64 value []byte } type byDescTS []cell func (b byDescTS) Len() int { return len(b) } func (b byDescTS) Swap(i, j int) { b[i], b[j] = b[j], b[i] } func (b byDescTS) Less(i, j int) bool { return b[i].ts > b[j].ts } ================================================ FILE: vendor/google.golang.org/cloud/bigtable/cmd/cbt/cbt.go ================================================ /* Copyright 2015 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main // Command docs are in cbtdoc.go. import ( "bufio" "bytes" "flag" "fmt" "go/format" "io/ioutil" "log" "os" "path/filepath" "regexp" "sort" "strconv" "strings" "text/tabwriter" "text/template" "time" "golang.org/x/net/context" "google.golang.org/cloud/bigtable" ) var ( // These get default values from $HOME/.cbtrc if it exists. project = flag.String("project", "", "project ID") zone = flag.String("zone", "", "CBT zone") cluster = flag.String("cluster", "", "CBT cluster") creds = flag.String("creds", "", "if set, use application credentials in this file") oFlag = flag.String("o", "", "if set, redirect stdout to this file") client *bigtable.Client adminClient *bigtable.AdminClient ) func getClient() *bigtable.Client { if client == nil { var err error client, err = bigtable.NewClient(context.Background(), *project, *zone, *cluster) if err != nil { log.Fatalf("Making bigtable.Client: %v", err) } } return client } func getAdminClient() *bigtable.AdminClient { if adminClient == nil { var err error adminClient, err = bigtable.NewAdminClient(context.Background(), *project, *zone, *cluster) if err != nil { log.Fatalf("Making bigtable.AdminClient: %v", err) } } return adminClient } func configFilename() string { // TODO(dsymonds): Might need tweaking for Windows. return filepath.Join(os.Getenv("HOME"), ".cbtrc") } func loadConfig() { filename := configFilename() data, err := ioutil.ReadFile(filename) if err != nil { // silent fail if the file isn't there if os.IsNotExist(err) { return } log.Fatalf("Reading %s: %v", filename, err) } s := bufio.NewScanner(bytes.NewReader(data)) for s.Scan() { line := s.Text() i := strings.Index(line, "=") if i < 0 { log.Fatalf("Bad line in %s: %q", filename, line) } key, val := strings.TrimSpace(line[:i]), strings.TrimSpace(line[i+1:]) switch key { default: log.Fatalf("Unknown key in %s: %q", filename, key) case "project": *project = val case "zone": *zone = val case "cluster": *cluster = val case "creds": *creds = val } } } func main() { loadConfig() flag.Usage = usage flag.Parse() if *project == "" { log.Fatal("Missing -project") } if *zone == "" { log.Fatal("Missing -zone") } if *cluster == "" { log.Fatal("Missing -cluster") } if *creds != "" { os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", *creds) } if flag.NArg() == 0 { usage() os.Exit(1) } if *oFlag != "" { f, err := os.Create(*oFlag) if err != nil { log.Fatal(err) } defer func() { if err := f.Close(); err != nil { log.Fatal(err) } }() os.Stdout = f } ctx := context.Background() for _, cmd := range commands { if cmd.Name == flag.Arg(0) { cmd.do(ctx, flag.Args()[1:]...) return } } log.Fatalf("Unknown command %q", flag.Arg(0)) } func usage() { fmt.Fprintf(os.Stderr, "Usage: %s [flags] ...\n", os.Args[0]) flag.PrintDefaults() fmt.Fprintf(os.Stderr, "\n%s", cmdSummary) } var cmdSummary string // generated in init, below func init() { var buf bytes.Buffer tw := tabwriter.NewWriter(&buf, 10, 8, 4, '\t', 0) for _, cmd := range commands { fmt.Fprintf(tw, "cbt %s\t%s\n", cmd.Name, cmd.Desc) } tw.Flush() buf.WriteString(configHelp) cmdSummary = buf.String() } var configHelp = ` For convenience, values of the -project, -zone, -cluster and -creds flags may be specified in ` + configFilename() + ` in this format: project = my-project-123 zone = us-central1-b cluster = my-cluster creds = path-to-account-key.json All values are optional, and all will be overridden by flags. ` var commands = []struct { Name, Desc string do func(context.Context, ...string) Usage string }{ { Name: "count", Desc: "Count rows in a table", do: doCount, Usage: "cbt count ", }, { Name: "createfamily", Desc: "Create a column family", do: doCreateFamily, Usage: "cbt createfamily
", }, { Name: "createtable", Desc: "Create a table", do: doCreateTable, Usage: "cbt createtable
", }, { Name: "deletefamily", Desc: "Delete a column family", do: doDeleteFamily, Usage: "cbt deletefamily
", }, { Name: "deleterow", Desc: "Delete a row", do: doDeleteRow, Usage: "cbt deleterow
", }, { Name: "deletetable", Desc: "Delete a table", do: doDeleteTable, Usage: "cbt deletetable
", }, { Name: "doc", Desc: "Print documentation for cbt", do: doDoc, Usage: "cbt doc", }, { Name: "help", Desc: "Print help text", do: doHelp, Usage: "cbt help [command]", }, { Name: "lookup", Desc: "Read from a single row", do: doLookup, Usage: "cbt lookup
", }, { Name: "ls", Desc: "List tables and column families", do: doLS, Usage: "cbt ls List tables\n" + "cbt ls
List column families in
", }, { Name: "read", Desc: "Read rows", do: doRead, Usage: "cbt read
[start=] [limit=] [prefix=]\n" + " start= Start reading at this row\n" + " limit= Stop reading before this row\n" + " prefix= Read rows with this prefix\n", }, { Name: "set", Desc: "Set value of a cell", do: doSet, Usage: "cbt set
family:column=val[@ts] ...\n" + " family:column=val[@ts] may be repeated to set multiple cells.\n" + "\n" + " ts is an optional integer timestamp.\n" + " If it cannot be parsed, the `@ts` part will be\n" + " interpreted as part of the value.", }, { Name: "setclustersize", Desc: "Set size of a cluster", do: doSetClusterSize, Usage: "cbt setclustersize ", }, } func doCount(ctx context.Context, args ...string) { if len(args) != 1 { log.Fatal("usage: cbt count
") } tbl := getClient().Open(args[0]) n := 0 err := tbl.ReadRows(ctx, bigtable.InfiniteRange(""), func(_ bigtable.Row) bool { n++ return true }, bigtable.RowFilter(bigtable.StripValueFilter())) if err != nil { log.Fatalf("Reading rows: %v", err) } fmt.Println(n) } func doCreateFamily(ctx context.Context, args ...string) { if len(args) != 2 { log.Fatal("usage: cbt createfamily
") } err := getAdminClient().CreateColumnFamily(ctx, args[0], args[1]) if err != nil { log.Fatalf("Creating column family: %v", err) } } func doCreateTable(ctx context.Context, args ...string) { if len(args) != 1 { log.Fatal("usage: cbt createtable
") } err := getAdminClient().CreateTable(ctx, args[0]) if err != nil { log.Fatalf("Creating table: %v", err) } } func doDeleteFamily(ctx context.Context, args ...string) { if len(args) != 2 { log.Fatal("usage: cbt deletefamily
") } err := getAdminClient().DeleteColumnFamily(ctx, args[0], args[1]) if err != nil { log.Fatalf("Deleting column family: %v", err) } } func doDeleteRow(ctx context.Context, args ...string) { if len(args) != 2 { log.Fatal("usage: cbt deleterow
") } tbl := getClient().Open(args[0]) mut := bigtable.NewMutation() mut.DeleteRow() if err := tbl.Apply(ctx, args[1], mut); err != nil { log.Fatalf("Deleting row: %v", err) } } func doDeleteTable(ctx context.Context, args ...string) { if len(args) != 1 { log.Fatalf("Can't do `cbt deletetable %s`", args) } err := getAdminClient().DeleteTable(ctx, args[0]) if err != nil { log.Fatalf("Deleting table: %v", err) } } // to break circular dependencies var ( doDocFn func(ctx context.Context, args ...string) doHelpFn func(ctx context.Context, args ...string) ) func init() { doDocFn = doDocReal doHelpFn = doHelpReal } func doDoc(ctx context.Context, args ...string) { doDocFn(ctx, args...) } func doHelp(ctx context.Context, args ...string) { doHelpFn(ctx, args...) } func doDocReal(ctx context.Context, args ...string) { data := map[string]interface{}{ "Commands": commands, } var buf bytes.Buffer if err := docTemplate.Execute(&buf, data); err != nil { log.Fatalf("Bad doc template: %v", err) } out, err := format.Source(buf.Bytes()) if err != nil { log.Fatalf("Bad doc output: %v", err) } os.Stdout.Write(out) } var docTemplate = template.Must(template.New("doc").Funcs(template.FuncMap{ "indent": func(s, ind string) string { ss := strings.Split(s, "\n") for i, p := range ss { ss[i] = ind + p } return strings.Join(ss, "\n") }, }). Parse(` // DO NOT EDIT. THIS IS AUTOMATICALLY GENERATED. // Run "go generate" to regenerate. //go:generate go run cbt.go -o cbtdoc.go doc /* Cbt is a tool for doing basic interactions with Cloud Bigtable. Usage: cbt [options] command [arguments] The commands are: {{range .Commands}} {{printf "%-25s %s" .Name .Desc}}{{end}} Use "cbt help " for more information about a command. {{range .Commands}} {{.Desc}} Usage: {{indent .Usage "\t"}} {{end}} */ package main `)) func doHelpReal(ctx context.Context, args ...string) { if len(args) == 0 { fmt.Print(cmdSummary) return } for _, cmd := range commands { if cmd.Name == args[0] { fmt.Println(cmd.Usage) return } } log.Fatalf("Don't know command %q", args[0]) } func doLookup(ctx context.Context, args ...string) { if len(args) != 2 { log.Fatalf("usage: cbt lookup
") } table, row := args[0], args[1] tbl := getClient().Open(table) r, err := tbl.ReadRow(ctx, row) if err != nil { log.Fatalf("Reading row: %v", err) } printRow(r) } func printRow(r bigtable.Row) { fmt.Println(strings.Repeat("-", 40)) fmt.Println(r.Key()) var fams []string for fam := range r { fams = append(fams, fam) } sort.Strings(fams) for _, fam := range fams { ris := r[fam] sort.Sort(byColumn(ris)) for _, ri := range ris { ts := time.Unix(0, int64(ri.Timestamp)*1e3) fmt.Printf(" %-40s @ %s\n", ri.Column, ts.Format("2006/01/02-15:04:05.000000")) fmt.Printf(" %q\n", ri.Value) } } } type byColumn []bigtable.ReadItem func (b byColumn) Len() int { return len(b) } func (b byColumn) Swap(i, j int) { b[i], b[j] = b[j], b[i] } func (b byColumn) Less(i, j int) bool { return b[i].Column < b[j].Column } func doLS(ctx context.Context, args ...string) { switch len(args) { default: log.Fatalf("Can't do `cbt ls %s`", args) case 0: tables, err := getAdminClient().Tables(ctx) if err != nil { log.Fatalf("Getting list of tables: %v", err) } sort.Strings(tables) for _, table := range tables { fmt.Println(table) } case 1: table := args[0] ti, err := getAdminClient().TableInfo(ctx, table) if err != nil { log.Fatalf("Getting table info: %v", err) } sort.Strings(ti.Families) for _, fam := range ti.Families { fmt.Println(fam) } } } func doRead(ctx context.Context, args ...string) { if len(args) < 1 { log.Fatalf("usage: cbt read
[args ...]") } tbl := getClient().Open(args[0]) parsed := make(map[string]string) for _, arg := range args[1:] { i := strings.Index(arg, "=") if i < 0 { log.Fatalf("Bad arg %q", arg) } key, val := arg[:i], arg[i+1:] switch key { default: log.Fatalf("Unknown arg key %q", key) case "start", "limit", "prefix": parsed[key] = val } } if (parsed["start"] != "" || parsed["limit"] != "") && parsed["prefix"] != "" { log.Fatal(`"start"/"limit" may not be mixed with "prefix"`) } var rr bigtable.RowRange if start, limit := parsed["start"], parsed["limit"]; limit != "" { rr = bigtable.NewRange(start, limit) } else if start != "" { rr = bigtable.InfiniteRange(start) } if prefix := parsed["prefix"]; prefix != "" { rr = bigtable.PrefixRange(prefix) } // TODO(dsymonds): Support filters. err := tbl.ReadRows(ctx, rr, func(r bigtable.Row) bool { printRow(r) return true }) if err != nil { log.Fatalf("Reading rows: %v", err) } } var setArg = regexp.MustCompile(`([^:]+):([^=]*)=(.*)`) func doSet(ctx context.Context, args ...string) { if len(args) < 3 { log.Fatalf("usage: cbt set
family:[column]=val[@ts] ...") } tbl := getClient().Open(args[0]) row := args[1] mut := bigtable.NewMutation() for _, arg := range args[2:] { m := setArg.FindStringSubmatch(arg) if m == nil { log.Fatalf("Bad set arg %q", arg) } val := m[3] ts := bigtable.Now() if i := strings.LastIndex(val, "@"); i >= 0 { // Try parsing a timestamp. n, err := strconv.ParseInt(val[i+1:], 0, 64) if err == nil { val = val[:i] ts = bigtable.Timestamp(n) } } mut.Set(m[1], m[2], ts, []byte(val)) } if err := tbl.Apply(ctx, row, mut); err != nil { log.Fatalf("Applying mutation: %v", err) } } func doSetClusterSize(ctx context.Context, args ...string) { if len(args) != 1 { log.Fatalf("usage: cbt setclustersize ") } n, err := strconv.ParseInt(args[0], 0, 32) if err != nil { log.Fatalf("Bad num_nodes value %q: %v", args[0], err) } if err := getAdminClient().SetClusterSize(ctx, int(n)); err != nil { log.Fatalf("Setting cluster size: %v", err) } } ================================================ FILE: vendor/google.golang.org/cloud/bigtable/cmd/cbt/cbtdoc.go ================================================ // DO NOT EDIT. THIS IS AUTOMATICALLY GENERATED. // Run "go generate" to regenerate. //go:generate go run cbt.go -o cbtdoc.go doc /* Cbt is a tool for doing basic interactions with Cloud Bigtable. Usage: cbt [options] command [arguments] The commands are: count Count rows in a table createfamily Create a column family createtable Create a table deletefamily Delete a column family deleterow Delete a row deletetable Delete a table doc Print documentation for cbt help Print help text lookup Read from a single row ls List tables and column families read Read rows set Set value of a cell setclustersize Set size of a cluster Use "cbt help " for more information about a command. Count rows in a table Usage: cbt count
Create a column family Usage: cbt createfamily
Create a table Usage: cbt createtable
Delete a column family Usage: cbt deletefamily
Delete a row Usage: cbt deleterow
Delete a table Usage: cbt deletetable
Print documentation for cbt Usage: cbt doc Print help text Usage: cbt help [command] Read from a single row Usage: cbt lookup
List tables and column families Usage: cbt ls List tables cbt ls
List column families in
Read rows Usage: cbt read
[start=] [limit=] [prefix=] start= Start reading at this row limit= Stop reading before this row prefix= Read rows with this prefix Set value of a cell Usage: cbt set
family:column=val[@ts] ... family:column=val[@ts] may be repeated to set multiple cells. ts is an optional integer timestamp. If it cannot be parsed, the `@ts` part will be interpreted as part of the value. Set size of a cluster Usage: cbt setclustersize */ package main ================================================ FILE: vendor/google.golang.org/cloud/bigtable/doc.go ================================================ /* Copyright 2015 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Package bigtable is an API to Google Cloud Bigtable. See https://cloud.google.com/bigtable/docs/ for general product documentation. Setup and Credentials Use NewClient or NewAdminClient to create a client that can be used to access the data or admin APIs respectively. Both require credentials that have permission to access the Cloud Bigtable API. If your program is run on Google App Engine or Google Compute Engine, using the Application Default Credentials (https://developers.google.com/accounts/docs/application-default-credentials) is the simplest option. Those credentials will be used by default when NewClient or NewAdminClient are called. To use alternate credentials, pass them to NewClient or NewAdminClient using cloud.WithTokenSource. For instance, you can use service account credentials by visiting https://cloud.google.com/console/project/MYPROJECT/apiui/credential, creating a new OAuth "Client ID", storing the JSON key somewhere accessible, and writing jsonKey, err := ioutil.ReadFile(pathToKeyFile) ... config, err := google.JWTConfigFromJSON(jsonKey, bigtable.Scope) // or bigtable.AdminScope, etc. ... client, err := bigtable.NewClient(ctx, project, zone, cluster, cloud.WithTokenSource(config.TokenSource())) ... Here, `google` means the golang.org/x/oauth2/google package and `cloud` means the google.golang.org/cloud package. Reading The principal way to read from a Bigtable is to use the ReadRows method on *Table. A RowRange specifies a contiguous portion of a table. A Filter may be provided through RowFilter to limit or transform the data that is returned. tbl := client.Open("mytable") ... // Read all the rows starting with "com.google.", // but only fetch the columns in the "links" family. rr := bigtable.PrefixRange("com.google.") err := tbl.ReadRows(ctx, rr, func(r Row) bool { // do something with r return true // keep going }, bigtable.RowFilter(bigtable.FamilyFilter("links"))) ... To read a single row, use the ReadRow helper method. r, err := tbl.ReadRow(ctx, "com.google.cloud") // "com.google.cloud" is the entire row key ... Writing This API exposes two distinct forms of writing to a Bigtable: a Mutation and a ReadModifyWrite. The former expresses idempotent operations. The latter expresses non-idempotent operations and returns the new values of updated cells. These operations are performed by creating a Mutation or ReadModifyWrite (with NewMutation or NewReadModifyWrite), building up one or more operations on that, and then using the Apply or ApplyReadModifyWrite methods on a Table. For instance, to set a couple of cells in a table, tbl := client.Open("mytable") mut := bigtable.NewMutation() mut.Set("links", "maps.google.com", bigtable.Now(), []byte("1")) mut.Set("links", "golang.org", bigtable.Now(), []byte("1")) err := tbl.Apply(ctx, "com.google.cloud", mut) ... To increment an encoded value in one cell, tbl := client.Open("mytable") rmw := bigtable.NewReadModifyWrite() rmw.Increment("links", "golang.org", 12) // add 12 to the cell in column "links:golang.org" r, err := tbl.ApplyReadModifyWrite(ctx, "com.google.cloud", rmw) ... */ package bigtable // Scope constants for authentication credentials. // These should be used when using credential creation functions such as credentials.NewServiceAccountFromFile. const ( // Scope is the OAuth scope for Cloud Bigtable data operations. Scope = "https://www.googleapis.com/auth/bigtable.data" // ReadonlyScope is the OAuth scope for Cloud Bigtable read-only data operations. ReadonlyScope = "https://www.googleapis.com/auth/bigtable.readonly" // AdminScope is the OAuth scope for Cloud Bigtable admin operations. AdminScope = "https://www.googleapis.com/auth/bigtable.admin" ) ================================================ FILE: vendor/google.golang.org/cloud/bigtable/filter.go ================================================ /* Copyright 2015 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package bigtable import ( "fmt" "strings" btdpb "google.golang.org/cloud/bigtable/internal/data_proto" ) // A Filter represents a row filter. type Filter interface { String() string proto() *btdpb.RowFilter } // ChainFilters returns a filter that applies a sequence of filters. func ChainFilters(sub ...Filter) Filter { return chainFilter{sub} } type chainFilter struct { sub []Filter } func (cf chainFilter) String() string { var ss []string for _, sf := range cf.sub { ss = append(ss, sf.String()) } return "(" + strings.Join(ss, " | ") + ")" } func (cf chainFilter) proto() *btdpb.RowFilter { f := &btdpb.RowFilter{ Chain: &btdpb.RowFilter_Chain{}, } for _, sf := range cf.sub { f.Chain.Filters = append(f.Chain.Filters, sf.proto()) } return f } // InterleaveFilters returns a filter that applies a set of filters in parallel // and interleaves the results. func InterleaveFilters(sub ...Filter) Filter { return interleaveFilter{sub} } type interleaveFilter struct { sub []Filter } func (ilf interleaveFilter) String() string { var ss []string for _, sf := range ilf.sub { ss = append(ss, sf.String()) } return "(" + strings.Join(ss, " + ") + ")" } func (ilf interleaveFilter) proto() *btdpb.RowFilter { f := &btdpb.RowFilter{ Interleave: &btdpb.RowFilter_Interleave{}, } for _, sf := range ilf.sub { f.Interleave.Filters = append(f.Interleave.Filters, sf.proto()) } return f } // RowKeyFilter returns a filter that matches cells from rows whose // key matches the provided RE2 pattern. // See https://github.com/google/re2/wiki/Syntax for the accepted syntax. func RowKeyFilter(pattern string) Filter { return rowKeyFilter(pattern) } type rowKeyFilter string func (rkf rowKeyFilter) String() string { return fmt.Sprintf("row(%s)", string(rkf)) } func (rkf rowKeyFilter) proto() *btdpb.RowFilter { return &btdpb.RowFilter{RowKeyRegexFilter: []byte(rkf)} } // FamilyFilter returns a filter that matches cells whose family name // matches the provided RE2 pattern. // See https://github.com/google/re2/wiki/Syntax for the accepted syntax. func FamilyFilter(pattern string) Filter { return familyFilter(pattern) } type familyFilter string func (ff familyFilter) String() string { return fmt.Sprintf("col(%s:)", string(ff)) } func (ff familyFilter) proto() *btdpb.RowFilter { return &btdpb.RowFilter{FamilyNameRegexFilter: string(ff)} } // ColumnFilter returns a filter that matches cells whose column name // matches the provided RE2 pattern. // See https://github.com/google/re2/wiki/Syntax for the accepted syntax. func ColumnFilter(pattern string) Filter { return columnFilter(pattern) } type columnFilter string func (cf columnFilter) String() string { return fmt.Sprintf("col(.*:%s)", string(cf)) } func (cf columnFilter) proto() *btdpb.RowFilter { return &btdpb.RowFilter{ColumnQualifierRegexFilter: []byte(cf)} } // ValueFilter returns a filter that matches cells whose value // matches the provided RE2 pattern. // See https://github.com/google/re2/wiki/Syntax for the accepted syntax. func ValueFilter(pattern string) Filter { return valueFilter(pattern) } type valueFilter string func (vf valueFilter) String() string { return fmt.Sprintf("value_match(%s)", string(vf)) } func (vf valueFilter) proto() *btdpb.RowFilter { return &btdpb.RowFilter{ValueRegexFilter: []byte(vf)} } // LatestNFilter returns a filter that matches the most recent N cells in each column. func LatestNFilter(n int) Filter { return latestNFilter(n) } type latestNFilter int32 func (lnf latestNFilter) String() string { return fmt.Sprintf("col(*,%d)", lnf) } func (lnf latestNFilter) proto() *btdpb.RowFilter { return &btdpb.RowFilter{CellsPerColumnLimitFilter: int32(lnf)} } // StripValueFilter returns a filter that replaces each value with the empty string. func StripValueFilter() Filter { return stripValueFilter{} } type stripValueFilter struct{} func (stripValueFilter) String() string { return "strip_value()" } func (stripValueFilter) proto() *btdpb.RowFilter { return &btdpb.RowFilter{StripValueTransformer: true} } // TODO(dsymonds): More filters: cond, col/ts/value range, sampling ================================================ FILE: vendor/google.golang.org/cloud/bigtable/internal/cluster_data_proto/bigtable_cluster_data.pb.go ================================================ // Code generated by protoc-gen-go. // source: google.golang.org/cloud/bigtable/internal/cluster_data_proto/bigtable_cluster_data.proto // DO NOT EDIT! /* Package google_bigtable_admin_cluster_v1 is a generated protocol buffer package. It is generated from these files: google.golang.org/cloud/bigtable/internal/cluster_data_proto/bigtable_cluster_data.proto It has these top-level messages: Zone Cluster */ package google_bigtable_admin_cluster_v1 import proto "github.com/golang/protobuf/proto" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal // Possible states of a zone. type Zone_Status int32 const ( // The state of the zone is unknown or unspecified. Zone_UNKNOWN Zone_Status = 0 // The zone is in a good state. Zone_OK Zone_Status = 1 // The zone is down for planned maintenance. Zone_PLANNED_MAINTENANCE Zone_Status = 2 // The zone is down for emergency or unplanned maintenance. Zone_EMERGENCY_MAINENANCE Zone_Status = 3 ) var Zone_Status_name = map[int32]string{ 0: "UNKNOWN", 1: "OK", 2: "PLANNED_MAINTENANCE", 3: "EMERGENCY_MAINENANCE", } var Zone_Status_value = map[string]int32{ "UNKNOWN": 0, "OK": 1, "PLANNED_MAINTENANCE": 2, "EMERGENCY_MAINENANCE": 3, } func (x Zone_Status) String() string { return proto.EnumName(Zone_Status_name, int32(x)) } // A physical location in which a particular project can allocate Cloud BigTable // resources. type Zone struct { // A permanent unique identifier for the zone. // Values are of the form projects//zones/[a-z][-a-z0-9]* Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // The name of this zone as it appears in UIs. DisplayName string `protobuf:"bytes,2,opt,name=display_name" json:"display_name,omitempty"` // The current state of this zone. Status Zone_Status `protobuf:"varint,3,opt,name=status,enum=google.bigtable.admin.cluster.v1.Zone_Status" json:"status,omitempty"` } func (m *Zone) Reset() { *m = Zone{} } func (m *Zone) String() string { return proto.CompactTextString(m) } func (*Zone) ProtoMessage() {} // An isolated set of Cloud BigTable resources on which tables can be hosted. type Cluster struct { // A permanent unique identifier for the cluster. For technical reasons, the // zone in which the cluster resides is included here. // Values are of the form // projects//zones//clusters/[a-z][-a-z0-9]* Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // The descriptive name for this cluster as it appears in UIs. // Must be unique per zone. DisplayName string `protobuf:"bytes,4,opt,name=display_name" json:"display_name,omitempty"` // The number of serve nodes allocated to this cluster. ServeNodes int32 `protobuf:"varint,5,opt,name=serve_nodes" json:"serve_nodes,omitempty"` // The maximum HDD storage usage allowed in this cluster, in bytes. HddBytes int64 `protobuf:"varint,6,opt,name=hdd_bytes" json:"hdd_bytes,omitempty"` // The maximum SSD storage usage allowed in this cluster, in bytes. SsdBytes int64 `protobuf:"varint,7,opt,name=ssd_bytes" json:"ssd_bytes,omitempty"` } func (m *Cluster) Reset() { *m = Cluster{} } func (m *Cluster) String() string { return proto.CompactTextString(m) } func (*Cluster) ProtoMessage() {} func init() { proto.RegisterEnum("google.bigtable.admin.cluster.v1.Zone_Status", Zone_Status_name, Zone_Status_value) } ================================================ FILE: vendor/google.golang.org/cloud/bigtable/internal/cluster_data_proto/bigtable_cluster_data.proto ================================================ // Copyright (c) 2015, Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package google.bigtable.admin.cluster.v1; option java_multiple_files = true; option java_outer_classname = "BigtableClusterDataProto"; option java_package = "com.google.bigtable.admin.cluster.v1"; // A physical location in which a particular project can allocate Cloud BigTable // resources. message Zone { // Possible states of a zone. enum Status { // The state of the zone is unknown or unspecified. UNKNOWN = 0; // The zone is in a good state. OK = 1; // The zone is down for planned maintenance. PLANNED_MAINTENANCE = 2; // The zone is down for emergency or unplanned maintenance. EMERGENCY_MAINENANCE = 3; } // A permanent unique identifier for the zone. // Values are of the form projects//zones/[a-z][-a-z0-9]* string name = 1; // The name of this zone as it appears in UIs. string display_name = 2; // The current state of this zone. Status status = 3; } // An isolated set of Cloud BigTable resources on which tables can be hosted. message Cluster { // A permanent unique identifier for the cluster. For technical reasons, the // zone in which the cluster resides is included here. // Values are of the form // projects//zones//clusters/[a-z][-a-z0-9]* string name = 1; // If this cluster has been deleted, the time at which its backup will // be irrevocably destroyed. Omitted otherwise. // This cannot be set directly, only through DeleteCluster. // The operation currently running on the cluster, if any. // This cannot be set directly, only through CreateCluster, UpdateCluster, // or UndeleteCluster. Calls to these methods will be rejected if // "current_operation" is already set. // The descriptive name for this cluster as it appears in UIs. // Must be unique per zone. string display_name = 4; // The number of serve nodes allocated to this cluster. int32 serve_nodes = 5; // The maximum HDD storage usage allowed in this cluster, in bytes. int64 hdd_bytes = 6; // The maximum SSD storage usage allowed in this cluster, in bytes. int64 ssd_bytes = 7; } ================================================ FILE: vendor/google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service.pb.go ================================================ // Code generated by protoc-gen-go. // source: google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service.proto // DO NOT EDIT! package google_bigtable_admin_cluster_v1 import proto "github.com/golang/protobuf/proto" import google_bigtable_admin_cluster_v11 "google.golang.org/cloud/bigtable/internal/cluster_data_proto" import google_protobuf "google.golang.org/cloud/bigtable/internal/empty" import ( context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal func init() { } // Client API for BigtableClusterService service type BigtableClusterServiceClient interface { // Lists the supported zones for the given project. ListZones(ctx context.Context, in *ListZonesRequest, opts ...grpc.CallOption) (*ListZonesResponse, error) // Gets information about a particular cluster. GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*google_bigtable_admin_cluster_v11.Cluster, error) // Lists all clusters in the given project, along with any zones for which // cluster information could not be retrieved. ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) // Creates a cluster and begins preparing it to begin serving. The returned // cluster embeds as its "current_operation" a long-running operation which // can be used to track the progress of turning up the new cluster. // Immediately upon completion of this request: // * The cluster will be readable via the API, with all requested attributes // but no allocated resources. // Until completion of the embedded operation: // * Cancelling the operation will render the cluster immediately unreadable // via the API. // * All other attempts to modify or delete the cluster will be rejected. // Upon completion of the embedded operation: // * Billing for all successfully-allocated resources will begin (some types // may have lower than the requested levels). // * New tables can be created in the cluster. // * The cluster's allocated resource levels will be readable via the API. // The embedded operation's "metadata" field type is // [CreateClusterMetadata][google.bigtable.admin.cluster.v1.CreateClusterMetadata] The embedded operation's "response" field type is // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*google_bigtable_admin_cluster_v11.Cluster, error) // Updates a cluster, and begins allocating or releasing resources as // requested. The returned cluster embeds as its "current_operation" a // long-running operation which can be used to track the progress of updating // the cluster. // Immediately upon completion of this request: // * For resource types where a decrease in the cluster's allocation has been // requested, billing will be based on the newly-requested level. // Until completion of the embedded operation: // * Cancelling the operation will set its metadata's "cancelled_at_time", // and begin restoring resources to their pre-request values. The operation // is guaranteed to succeed at undoing all resource changes, after which // point it will terminate with a CANCELLED status. // * All other attempts to modify or delete the cluster will be rejected. // * Reading the cluster via the API will continue to give the pre-request // resource levels. // Upon completion of the embedded operation: // * Billing will begin for all successfully-allocated resources (some types // may have lower than the requested levels). // * All newly-reserved resources will be available for serving the cluster's // tables. // * The cluster's new resource levels will be readable via the API. // [UpdateClusterMetadata][google.bigtable.admin.cluster.v1.UpdateClusterMetadata] The embedded operation's "response" field type is // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. UpdateCluster(ctx context.Context, in *google_bigtable_admin_cluster_v11.Cluster, opts ...grpc.CallOption) (*google_bigtable_admin_cluster_v11.Cluster, error) // Marks a cluster and all of its tables for permanent deletion in 7 days. // Immediately upon completion of the request: // * Billing will cease for all of the cluster's reserved resources. // * The cluster's "delete_time" field will be set 7 days in the future. // Soon afterward: // * All tables within the cluster will become unavailable. // Prior to the cluster's "delete_time": // * The cluster can be recovered with a call to UndeleteCluster. // * All other attempts to modify or delete the cluster will be rejected. // At the cluster's "delete_time": // * The cluster and *all of its tables* will immediately and irrevocably // disappear from the API, and their data will be permanently deleted. DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) } type bigtableClusterServiceClient struct { cc *grpc.ClientConn } func NewBigtableClusterServiceClient(cc *grpc.ClientConn) BigtableClusterServiceClient { return &bigtableClusterServiceClient{cc} } func (c *bigtableClusterServiceClient) ListZones(ctx context.Context, in *ListZonesRequest, opts ...grpc.CallOption) (*ListZonesResponse, error) { out := new(ListZonesResponse) err := grpc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/ListZones", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *bigtableClusterServiceClient) GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*google_bigtable_admin_cluster_v11.Cluster, error) { out := new(google_bigtable_admin_cluster_v11.Cluster) err := grpc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/GetCluster", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *bigtableClusterServiceClient) ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) { out := new(ListClustersResponse) err := grpc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/ListClusters", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *bigtableClusterServiceClient) CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*google_bigtable_admin_cluster_v11.Cluster, error) { out := new(google_bigtable_admin_cluster_v11.Cluster) err := grpc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/CreateCluster", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *bigtableClusterServiceClient) UpdateCluster(ctx context.Context, in *google_bigtable_admin_cluster_v11.Cluster, opts ...grpc.CallOption) (*google_bigtable_admin_cluster_v11.Cluster, error) { out := new(google_bigtable_admin_cluster_v11.Cluster) err := grpc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/UpdateCluster", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *bigtableClusterServiceClient) DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) { out := new(google_protobuf.Empty) err := grpc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/DeleteCluster", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } // Server API for BigtableClusterService service type BigtableClusterServiceServer interface { // Lists the supported zones for the given project. ListZones(context.Context, *ListZonesRequest) (*ListZonesResponse, error) // Gets information about a particular cluster. GetCluster(context.Context, *GetClusterRequest) (*google_bigtable_admin_cluster_v11.Cluster, error) // Lists all clusters in the given project, along with any zones for which // cluster information could not be retrieved. ListClusters(context.Context, *ListClustersRequest) (*ListClustersResponse, error) // Creates a cluster and begins preparing it to begin serving. The returned // cluster embeds as its "current_operation" a long-running operation which // can be used to track the progress of turning up the new cluster. // Immediately upon completion of this request: // * The cluster will be readable via the API, with all requested attributes // but no allocated resources. // Until completion of the embedded operation: // * Cancelling the operation will render the cluster immediately unreadable // via the API. // * All other attempts to modify or delete the cluster will be rejected. // Upon completion of the embedded operation: // * Billing for all successfully-allocated resources will begin (some types // may have lower than the requested levels). // * New tables can be created in the cluster. // * The cluster's allocated resource levels will be readable via the API. // The embedded operation's "metadata" field type is // [CreateClusterMetadata][google.bigtable.admin.cluster.v1.CreateClusterMetadata] The embedded operation's "response" field type is // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. CreateCluster(context.Context, *CreateClusterRequest) (*google_bigtable_admin_cluster_v11.Cluster, error) // Updates a cluster, and begins allocating or releasing resources as // requested. The returned cluster embeds as its "current_operation" a // long-running operation which can be used to track the progress of updating // the cluster. // Immediately upon completion of this request: // * For resource types where a decrease in the cluster's allocation has been // requested, billing will be based on the newly-requested level. // Until completion of the embedded operation: // * Cancelling the operation will set its metadata's "cancelled_at_time", // and begin restoring resources to their pre-request values. The operation // is guaranteed to succeed at undoing all resource changes, after which // point it will terminate with a CANCELLED status. // * All other attempts to modify or delete the cluster will be rejected. // * Reading the cluster via the API will continue to give the pre-request // resource levels. // Upon completion of the embedded operation: // * Billing will begin for all successfully-allocated resources (some types // may have lower than the requested levels). // * All newly-reserved resources will be available for serving the cluster's // tables. // * The cluster's new resource levels will be readable via the API. // [UpdateClusterMetadata][google.bigtable.admin.cluster.v1.UpdateClusterMetadata] The embedded operation's "response" field type is // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. UpdateCluster(context.Context, *google_bigtable_admin_cluster_v11.Cluster) (*google_bigtable_admin_cluster_v11.Cluster, error) // Marks a cluster and all of its tables for permanent deletion in 7 days. // Immediately upon completion of the request: // * Billing will cease for all of the cluster's reserved resources. // * The cluster's "delete_time" field will be set 7 days in the future. // Soon afterward: // * All tables within the cluster will become unavailable. // Prior to the cluster's "delete_time": // * The cluster can be recovered with a call to UndeleteCluster. // * All other attempts to modify or delete the cluster will be rejected. // At the cluster's "delete_time": // * The cluster and *all of its tables* will immediately and irrevocably // disappear from the API, and their data will be permanently deleted. DeleteCluster(context.Context, *DeleteClusterRequest) (*google_protobuf.Empty, error) } func RegisterBigtableClusterServiceServer(s *grpc.Server, srv BigtableClusterServiceServer) { s.RegisterService(&_BigtableClusterService_serviceDesc, srv) } func _BigtableClusterService_ListZones_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { in := new(ListZonesRequest) if err := codec.Unmarshal(buf, in); err != nil { return nil, err } out, err := srv.(BigtableClusterServiceServer).ListZones(ctx, in) if err != nil { return nil, err } return out, nil } func _BigtableClusterService_GetCluster_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { in := new(GetClusterRequest) if err := codec.Unmarshal(buf, in); err != nil { return nil, err } out, err := srv.(BigtableClusterServiceServer).GetCluster(ctx, in) if err != nil { return nil, err } return out, nil } func _BigtableClusterService_ListClusters_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { in := new(ListClustersRequest) if err := codec.Unmarshal(buf, in); err != nil { return nil, err } out, err := srv.(BigtableClusterServiceServer).ListClusters(ctx, in) if err != nil { return nil, err } return out, nil } func _BigtableClusterService_CreateCluster_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { in := new(CreateClusterRequest) if err := codec.Unmarshal(buf, in); err != nil { return nil, err } out, err := srv.(BigtableClusterServiceServer).CreateCluster(ctx, in) if err != nil { return nil, err } return out, nil } func _BigtableClusterService_UpdateCluster_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { in := new(google_bigtable_admin_cluster_v11.Cluster) if err := codec.Unmarshal(buf, in); err != nil { return nil, err } out, err := srv.(BigtableClusterServiceServer).UpdateCluster(ctx, in) if err != nil { return nil, err } return out, nil } func _BigtableClusterService_DeleteCluster_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { in := new(DeleteClusterRequest) if err := codec.Unmarshal(buf, in); err != nil { return nil, err } out, err := srv.(BigtableClusterServiceServer).DeleteCluster(ctx, in) if err != nil { return nil, err } return out, nil } var _BigtableClusterService_serviceDesc = grpc.ServiceDesc{ ServiceName: "google.bigtable.admin.cluster.v1.BigtableClusterService", HandlerType: (*BigtableClusterServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "ListZones", Handler: _BigtableClusterService_ListZones_Handler, }, { MethodName: "GetCluster", Handler: _BigtableClusterService_GetCluster_Handler, }, { MethodName: "ListClusters", Handler: _BigtableClusterService_ListClusters_Handler, }, { MethodName: "CreateCluster", Handler: _BigtableClusterService_CreateCluster_Handler, }, { MethodName: "UpdateCluster", Handler: _BigtableClusterService_UpdateCluster_Handler, }, { MethodName: "DeleteCluster", Handler: _BigtableClusterService_DeleteCluster_Handler, }, }, Streams: []grpc.StreamDesc{}, } ================================================ FILE: vendor/google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service.proto ================================================ // Copyright (c) 2015, Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package google.bigtable.admin.cluster.v1; import "google.golang.org/cloud/bigtable/internal/cluster_data_proto/bigtable_cluster_data.proto"; import "google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service_messages.proto"; import "google.golang.org/cloud/bigtable/internal/empty/empty.proto"; option java_multiple_files = true; option java_outer_classname = "BigtableClusterServicesProto"; option java_package = "com.google.bigtable.admin.cluster.v1"; // Service for managing zonal Cloud Bigtable resources. service BigtableClusterService { // Lists the supported zones for the given project. rpc ListZones(ListZonesRequest) returns (ListZonesResponse) { } // Gets information about a particular cluster. rpc GetCluster(GetClusterRequest) returns (Cluster) { } // Lists all clusters in the given project, along with any zones for which // cluster information could not be retrieved. rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) { } // Creates a cluster and begins preparing it to begin serving. The returned // cluster embeds as its "current_operation" a long-running operation which // can be used to track the progress of turning up the new cluster. // Immediately upon completion of this request: // * The cluster will be readable via the API, with all requested attributes // but no allocated resources. // Until completion of the embedded operation: // * Cancelling the operation will render the cluster immediately unreadable // via the API. // * All other attempts to modify or delete the cluster will be rejected. // Upon completion of the embedded operation: // * Billing for all successfully-allocated resources will begin (some types // may have lower than the requested levels). // * New tables can be created in the cluster. // * The cluster's allocated resource levels will be readable via the API. // The embedded operation's "metadata" field type is // [CreateClusterMetadata][google.bigtable.admin.cluster.v1.CreateClusterMetadata] The embedded operation's "response" field type is // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. rpc CreateCluster(CreateClusterRequest) returns (Cluster) { } // Updates a cluster, and begins allocating or releasing resources as // requested. The returned cluster embeds as its "current_operation" a // long-running operation which can be used to track the progress of updating // the cluster. // Immediately upon completion of this request: // * For resource types where a decrease in the cluster's allocation has been // requested, billing will be based on the newly-requested level. // Until completion of the embedded operation: // * Cancelling the operation will set its metadata's "cancelled_at_time", // and begin restoring resources to their pre-request values. The operation // is guaranteed to succeed at undoing all resource changes, after which // point it will terminate with a CANCELLED status. // * All other attempts to modify or delete the cluster will be rejected. // * Reading the cluster via the API will continue to give the pre-request // resource levels. // Upon completion of the embedded operation: // * Billing will begin for all successfully-allocated resources (some types // may have lower than the requested levels). // * All newly-reserved resources will be available for serving the cluster's // tables. // * The cluster's new resource levels will be readable via the API. // [UpdateClusterMetadata][google.bigtable.admin.cluster.v1.UpdateClusterMetadata] The embedded operation's "response" field type is // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. rpc UpdateCluster(Cluster) returns (Cluster) { } // Marks a cluster and all of its tables for permanent deletion in 7 days. // Immediately upon completion of the request: // * Billing will cease for all of the cluster's reserved resources. // * The cluster's "delete_time" field will be set 7 days in the future. // Soon afterward: // * All tables within the cluster will become unavailable. // Prior to the cluster's "delete_time": // * The cluster can be recovered with a call to UndeleteCluster. // * All other attempts to modify or delete the cluster will be rejected. // At the cluster's "delete_time": // * The cluster and *all of its tables* will immediately and irrevocably // disappear from the API, and their data will be permanently deleted. rpc DeleteCluster(DeleteClusterRequest) returns (google.protobuf.Empty) { } // Cancels the scheduled deletion of an cluster and begins preparing it to // resume serving. The returned operation will also be embedded as the // cluster's "current_operation". // Immediately upon completion of this request: // * The cluster's "delete_time" field will be unset, protecting it from // automatic deletion. // Until completion of the returned operation: // * The operation cannot be cancelled. // Upon completion of the returned operation: // * Billing for the cluster's resources will resume. // * All tables within the cluster will be available. // [UndeleteClusterMetadata][google.bigtable.admin.cluster.v1.UndeleteClusterMetadata] The embedded operation's "response" field type is // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. } ================================================ FILE: vendor/google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service_messages.pb.go ================================================ // Code generated by protoc-gen-go. // source: google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service_messages.proto // DO NOT EDIT! /* Package google_bigtable_admin_cluster_v1 is a generated protocol buffer package. It is generated from these files: google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service_messages.proto google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service.proto It has these top-level messages: ListZonesRequest ListZonesResponse GetClusterRequest ListClustersRequest ListClustersResponse CreateClusterRequest CreateClusterMetadata UpdateClusterMetadata DeleteClusterRequest UndeleteClusterRequest UndeleteClusterMetadata */ package google_bigtable_admin_cluster_v1 import proto "github.com/golang/protobuf/proto" import google_bigtable_admin_cluster_v11 "google.golang.org/cloud/bigtable/internal/cluster_data_proto" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal // Request message for BigtableClusterService.ListZones. type ListZonesRequest struct { // The unique name of the project for which a list of supported zones is // requested. // Values are of the form projects/ Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` } func (m *ListZonesRequest) Reset() { *m = ListZonesRequest{} } func (m *ListZonesRequest) String() string { return proto.CompactTextString(m) } func (*ListZonesRequest) ProtoMessage() {} // Response message for BigtableClusterService.ListZones. type ListZonesResponse struct { // The list of requested zones. Zones []*google_bigtable_admin_cluster_v11.Zone `protobuf:"bytes,1,rep,name=zones" json:"zones,omitempty"` } func (m *ListZonesResponse) Reset() { *m = ListZonesResponse{} } func (m *ListZonesResponse) String() string { return proto.CompactTextString(m) } func (*ListZonesResponse) ProtoMessage() {} func (m *ListZonesResponse) GetZones() []*google_bigtable_admin_cluster_v11.Zone { if m != nil { return m.Zones } return nil } // Request message for BigtableClusterService.GetCluster. type GetClusterRequest struct { // The unique name of the requested cluster. // Values are of the form projects//zones//clusters/ Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` } func (m *GetClusterRequest) Reset() { *m = GetClusterRequest{} } func (m *GetClusterRequest) String() string { return proto.CompactTextString(m) } func (*GetClusterRequest) ProtoMessage() {} // Request message for BigtableClusterService.ListClusters. type ListClustersRequest struct { // The unique name of the project for which a list of clusters is requested. // Values are of the form projects/ Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` } func (m *ListClustersRequest) Reset() { *m = ListClustersRequest{} } func (m *ListClustersRequest) String() string { return proto.CompactTextString(m) } func (*ListClustersRequest) ProtoMessage() {} // Response message for BigtableClusterService.ListClusters. type ListClustersResponse struct { // The list of requested Clusters. Clusters []*google_bigtable_admin_cluster_v11.Cluster `protobuf:"bytes,1,rep,name=clusters" json:"clusters,omitempty"` // The zones for which clusters could not be retrieved. FailedZones []*google_bigtable_admin_cluster_v11.Zone `protobuf:"bytes,2,rep,name=failed_zones" json:"failed_zones,omitempty"` } func (m *ListClustersResponse) Reset() { *m = ListClustersResponse{} } func (m *ListClustersResponse) String() string { return proto.CompactTextString(m) } func (*ListClustersResponse) ProtoMessage() {} func (m *ListClustersResponse) GetClusters() []*google_bigtable_admin_cluster_v11.Cluster { if m != nil { return m.Clusters } return nil } func (m *ListClustersResponse) GetFailedZones() []*google_bigtable_admin_cluster_v11.Zone { if m != nil { return m.FailedZones } return nil } // Request message for BigtableClusterService.CreateCluster. type CreateClusterRequest struct { // The unique name of the zone in which to create the cluster. // Values are of the form projects//zones/ Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // The id to be used when referring to the new cluster within its zone, // e.g. just the "test-cluster" section of the full name // "projects//zones//clusters/test-cluster". ClusterId string `protobuf:"bytes,2,opt,name=cluster_id" json:"cluster_id,omitempty"` // The cluster to create. // The "name", "delete_time", and "current_operation" fields must be left // blank. Cluster *google_bigtable_admin_cluster_v11.Cluster `protobuf:"bytes,3,opt,name=cluster" json:"cluster,omitempty"` } func (m *CreateClusterRequest) Reset() { *m = CreateClusterRequest{} } func (m *CreateClusterRequest) String() string { return proto.CompactTextString(m) } func (*CreateClusterRequest) ProtoMessage() {} func (m *CreateClusterRequest) GetCluster() *google_bigtable_admin_cluster_v11.Cluster { if m != nil { return m.Cluster } return nil } // Metadata type for the operation returned by // BigtableClusterService.CreateCluster. type CreateClusterMetadata struct { // The request which prompted the creation of this operation. OriginalRequest *CreateClusterRequest `protobuf:"bytes,1,opt,name=original_request" json:"original_request,omitempty"` } func (m *CreateClusterMetadata) Reset() { *m = CreateClusterMetadata{} } func (m *CreateClusterMetadata) String() string { return proto.CompactTextString(m) } func (*CreateClusterMetadata) ProtoMessage() {} func (m *CreateClusterMetadata) GetOriginalRequest() *CreateClusterRequest { if m != nil { return m.OriginalRequest } return nil } // Metadata type for the operation returned by // BigtableClusterService.UpdateCluster. type UpdateClusterMetadata struct { // The request which prompted the creation of this operation. OriginalRequest *google_bigtable_admin_cluster_v11.Cluster `protobuf:"bytes,1,opt,name=original_request" json:"original_request,omitempty"` } func (m *UpdateClusterMetadata) Reset() { *m = UpdateClusterMetadata{} } func (m *UpdateClusterMetadata) String() string { return proto.CompactTextString(m) } func (*UpdateClusterMetadata) ProtoMessage() {} func (m *UpdateClusterMetadata) GetOriginalRequest() *google_bigtable_admin_cluster_v11.Cluster { if m != nil { return m.OriginalRequest } return nil } // Request message for BigtableClusterService.DeleteCluster. type DeleteClusterRequest struct { // The unique name of the cluster to be deleted. // Values are of the form projects//zones//clusters/ Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` } func (m *DeleteClusterRequest) Reset() { *m = DeleteClusterRequest{} } func (m *DeleteClusterRequest) String() string { return proto.CompactTextString(m) } func (*DeleteClusterRequest) ProtoMessage() {} // Request message for BigtableClusterService.UndeleteCluster. type UndeleteClusterRequest struct { // The unique name of the cluster to be un-deleted. // Values are of the form projects//zones//clusters/ Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` } func (m *UndeleteClusterRequest) Reset() { *m = UndeleteClusterRequest{} } func (m *UndeleteClusterRequest) String() string { return proto.CompactTextString(m) } func (*UndeleteClusterRequest) ProtoMessage() {} // Metadata type for the operation returned by // BigtableClusterService.UndeleteCluster. type UndeleteClusterMetadata struct { } func (m *UndeleteClusterMetadata) Reset() { *m = UndeleteClusterMetadata{} } func (m *UndeleteClusterMetadata) String() string { return proto.CompactTextString(m) } func (*UndeleteClusterMetadata) ProtoMessage() {} func init() { } ================================================ FILE: vendor/google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service_messages.proto ================================================ // Copyright (c) 2015, Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package google.bigtable.admin.cluster.v1; import "google.golang.org/cloud/bigtable/internal/cluster_data_proto/bigtable_cluster_data.proto"; option java_multiple_files = true; option java_outer_classname = "BigtableClusterServiceMessagesProto"; option java_package = "com.google.bigtable.admin.cluster.v1"; // Request message for BigtableClusterService.ListZones. message ListZonesRequest { // The unique name of the project for which a list of supported zones is // requested. // Values are of the form projects/ string name = 1; } // Response message for BigtableClusterService.ListZones. message ListZonesResponse { // The list of requested zones. repeated Zone zones = 1; } // Request message for BigtableClusterService.GetCluster. message GetClusterRequest { // The unique name of the requested cluster. // Values are of the form projects//zones//clusters/ string name = 1; } // Request message for BigtableClusterService.ListClusters. message ListClustersRequest { // The unique name of the project for which a list of clusters is requested. // Values are of the form projects/ string name = 1; } // Response message for BigtableClusterService.ListClusters. message ListClustersResponse { // The list of requested Clusters. repeated Cluster clusters = 1; // The zones for which clusters could not be retrieved. repeated Zone failed_zones = 2; } // Request message for BigtableClusterService.CreateCluster. message CreateClusterRequest { // The unique name of the zone in which to create the cluster. // Values are of the form projects//zones/ string name = 1; // The id to be used when referring to the new cluster within its zone, // e.g. just the "test-cluster" section of the full name // "projects//zones//clusters/test-cluster". string cluster_id = 2; // The cluster to create. // The "name", "delete_time", and "current_operation" fields must be left // blank. Cluster cluster = 3; } // Metadata type for the operation returned by // BigtableClusterService.CreateCluster. message CreateClusterMetadata { // The request which prompted the creation of this operation. CreateClusterRequest original_request = 1; // The time at which original_request was received. // The time at which this operation failed or was completed successfully. } // Metadata type for the operation returned by // BigtableClusterService.UpdateCluster. message UpdateClusterMetadata { // The request which prompted the creation of this operation. Cluster original_request = 1; // The time at which original_request was received. // The time at which this operation was cancelled. If set, this operation is // in the process of undoing itself (which is guaranteed to succeed) and // cannot be cancelled again. // The time at which this operation failed or was completed successfully. } // Request message for BigtableClusterService.DeleteCluster. message DeleteClusterRequest { // The unique name of the cluster to be deleted. // Values are of the form projects//zones//clusters/ string name = 1; } // Request message for BigtableClusterService.UndeleteCluster. message UndeleteClusterRequest { // The unique name of the cluster to be un-deleted. // Values are of the form projects//zones//clusters/ string name = 1; } // Metadata type for the operation returned by // BigtableClusterService.UndeleteCluster. message UndeleteClusterMetadata { // The time at which the original request was received. // The time at which this operation failed or was completed successfully. } ================================================ FILE: vendor/google.golang.org/cloud/bigtable/internal/data_proto/bigtable_data.pb.go ================================================ // Code generated by protoc-gen-go. // source: google.golang.org/cloud/bigtable/internal/data_proto/bigtable_data.proto // DO NOT EDIT! /* Package google_bigtable_v1 is a generated protocol buffer package. It is generated from these files: google.golang.org/cloud/bigtable/internal/data_proto/bigtable_data.proto It has these top-level messages: Row Family Column Cell RowRange ColumnRange TimestampRange ValueRange RowFilter Mutation ReadModifyWriteRule */ package google_bigtable_v1 import proto "github.com/golang/protobuf/proto" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal // Specifies the complete (requested) contents of a single row of a table. // Rows which exceed 256MiB in size cannot be read in full. type Row struct { // The unique key which identifies this row within its table. This is the same // key that's used to identify the row in, for example, a MutateRowRequest. // May contain any non-empty byte string up to 16KiB in length. Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` // May be empty, but only if the entire row is empty. // The mutual ordering of column families is not specified. Families []*Family `protobuf:"bytes,2,rep,name=families" json:"families,omitempty"` } func (m *Row) Reset() { *m = Row{} } func (m *Row) String() string { return proto.CompactTextString(m) } func (*Row) ProtoMessage() {} func (m *Row) GetFamilies() []*Family { if m != nil { return m.Families } return nil } // Specifies (some of) the contents of a single row/column family of a table. type Family struct { // The unique key which identifies this family within its row. This is the // same key that's used to identify the family in, for example, a RowFilter // which sets its "family_name_regex_filter" field. // Must match [-_.a-zA-Z0-9]+, except that AggregatingRowProcessors may // produce cells in a sentinel family with an empty name. // Must be no greater than 64 characters in length. Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // Must not be empty. Sorted in order of increasing "qualifier". Columns []*Column `protobuf:"bytes,2,rep,name=columns" json:"columns,omitempty"` } func (m *Family) Reset() { *m = Family{} } func (m *Family) String() string { return proto.CompactTextString(m) } func (*Family) ProtoMessage() {} func (m *Family) GetColumns() []*Column { if m != nil { return m.Columns } return nil } // Specifies (some of) the contents of a single row/column of a table. type Column struct { // The unique key which identifies this column within its family. This is the // same key that's used to identify the column in, for example, a RowFilter // which sets its "column_qualifier_regex_filter" field. // May contain any byte string, including the empty string, up to 16kiB in // length. Qualifier []byte `protobuf:"bytes,1,opt,name=qualifier,proto3" json:"qualifier,omitempty"` // Must not be empty. Sorted in order of decreasing "timestamp_micros". Cells []*Cell `protobuf:"bytes,2,rep,name=cells" json:"cells,omitempty"` } func (m *Column) Reset() { *m = Column{} } func (m *Column) String() string { return proto.CompactTextString(m) } func (*Column) ProtoMessage() {} func (m *Column) GetCells() []*Cell { if m != nil { return m.Cells } return nil } // Specifies (some of) the contents of a single row/column/timestamp of a table. type Cell struct { // The cell's stored timestamp, which also uniquely identifies it within // its column. // Values are always expressed in microseconds, but individual tables may set // a coarser "granularity" to further restrict the allowed values. For // example, a table which specifies millisecond granularity will only allow // values of "timestamp_micros" which are multiples of 1000. TimestampMicros int64 `protobuf:"varint,1,opt,name=timestamp_micros" json:"timestamp_micros,omitempty"` // The value stored in the cell. // May contain any byte string, including the empty string, up to 100MiB in // length. Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } func (m *Cell) Reset() { *m = Cell{} } func (m *Cell) String() string { return proto.CompactTextString(m) } func (*Cell) ProtoMessage() {} // Specifies a contiguous range of rows. type RowRange struct { // Inclusive lower bound. If left empty, interpreted as the empty string. StartKey []byte `protobuf:"bytes,2,opt,name=start_key,proto3" json:"start_key,omitempty"` // Exclusive upper bound. If left empty, interpreted as infinity. EndKey []byte `protobuf:"bytes,3,opt,name=end_key,proto3" json:"end_key,omitempty"` } func (m *RowRange) Reset() { *m = RowRange{} } func (m *RowRange) String() string { return proto.CompactTextString(m) } func (*RowRange) ProtoMessage() {} // Specifies a contiguous range of columns within a single column family. // The range spans from : to // :, where both bounds can be either inclusive or // exclusive. type ColumnRange struct { // The name of the column family within which this range falls. FamilyName string `protobuf:"bytes,1,opt,name=family_name" json:"family_name,omitempty"` // Used when giving an inclusive lower bound for the range. StartQualifierInclusive []byte `protobuf:"bytes,2,opt,name=start_qualifier_inclusive,proto3" json:"start_qualifier_inclusive,omitempty"` // Used when giving an exclusive lower bound for the range. StartQualifierExclusive []byte `protobuf:"bytes,3,opt,name=start_qualifier_exclusive,proto3" json:"start_qualifier_exclusive,omitempty"` // Used when giving an inclusive upper bound for the range. EndQualifierInclusive []byte `protobuf:"bytes,4,opt,name=end_qualifier_inclusive,proto3" json:"end_qualifier_inclusive,omitempty"` // Used when giving an exclusive upper bound for the range. EndQualifierExclusive []byte `protobuf:"bytes,5,opt,name=end_qualifier_exclusive,proto3" json:"end_qualifier_exclusive,omitempty"` } func (m *ColumnRange) Reset() { *m = ColumnRange{} } func (m *ColumnRange) String() string { return proto.CompactTextString(m) } func (*ColumnRange) ProtoMessage() {} // Specified a contiguous range of microsecond timestamps. type TimestampRange struct { // Inclusive lower bound. If left empty, interpreted as 0. StartTimestampMicros int64 `protobuf:"varint,1,opt,name=start_timestamp_micros" json:"start_timestamp_micros,omitempty"` // Exclusive upper bound. If left empty, interpreted as infinity. EndTimestampMicros int64 `protobuf:"varint,2,opt,name=end_timestamp_micros" json:"end_timestamp_micros,omitempty"` } func (m *TimestampRange) Reset() { *m = TimestampRange{} } func (m *TimestampRange) String() string { return proto.CompactTextString(m) } func (*TimestampRange) ProtoMessage() {} // Specifies a contiguous range of raw byte values. type ValueRange struct { // Used when giving an inclusive lower bound for the range. StartValueInclusive []byte `protobuf:"bytes,1,opt,name=start_value_inclusive,proto3" json:"start_value_inclusive,omitempty"` // Used when giving an exclusive lower bound for the range. StartValueExclusive []byte `protobuf:"bytes,2,opt,name=start_value_exclusive,proto3" json:"start_value_exclusive,omitempty"` // Used when giving an inclusive upper bound for the range. EndValueInclusive []byte `protobuf:"bytes,3,opt,name=end_value_inclusive,proto3" json:"end_value_inclusive,omitempty"` // Used when giving an exclusive upper bound for the range. EndValueExclusive []byte `protobuf:"bytes,4,opt,name=end_value_exclusive,proto3" json:"end_value_exclusive,omitempty"` } func (m *ValueRange) Reset() { *m = ValueRange{} } func (m *ValueRange) String() string { return proto.CompactTextString(m) } func (*ValueRange) ProtoMessage() {} // Takes a row as input and produces an alternate view of the row based on // specified rules. For example, a RowFilter might trim down a row to include // just the cells from columns matching a given regular expression, or might // return all the cells of a row but not their values. More complicated filters // can be composed out of these components to express requests such as, "within // every column of a particular family, give just the two most recent cells // which are older than timestamp X." // // There are two broad categories of RowFilters (true filters and transformers), // as well as two ways to compose simple filters into more complex ones // (chains and interleaves). They work as follows: // // * True filters alter the input row by excluding some of its cells wholesale // from the output row. An example of a true filter is the "value_regex_filter", // which excludes cells whose values don't match the specified pattern. All // regex true filters use RE2 syntax (https://github.com/google/re2/wiki/Syntax) // in raw byte mode (RE2::Latin1), and are evaluated as full matches. An // important point to keep in mind is that RE2(.) is equivalent by default to // RE2([^\n]), meaning that it does not match newlines. When attempting to match // an arbitrary byte, you should therefore use the escape sequence '\C', which // may need to be further escaped as '\\C' in your client language. // // * Transformers alter the input row by changing the values of some of its // cells in the output, without excluding them completely. Currently, the only // supported transformer is the "strip_value_transformer", which replaces every // cell's value with the empty string. // // * Chains and interleaves are described in more detail in the // RowFilter.Chain and RowFilter.Interleave documentation. // // The total serialized size of a RowFilter message must not // exceed 4096 bytes, and RowFilters may not be nested within each other // (in Chains or Interleaves) to a depth of more than 20. type RowFilter struct { // Applies several RowFilters to the data in sequence, progressively // narrowing the results. Chain *RowFilter_Chain `protobuf:"bytes,1,opt,name=chain" json:"chain,omitempty"` // Applies several RowFilters to the data in parallel and combines the // results. Interleave *RowFilter_Interleave `protobuf:"bytes,2,opt,name=interleave" json:"interleave,omitempty"` // Applies one of two possible RowFilters to the data based on the output of // a predicate RowFilter. Condition *RowFilter_Condition `protobuf:"bytes,3,opt,name=condition" json:"condition,omitempty"` // Matches only cells from rows whose keys satisfy the given RE2 regex. In // other words, passes through the entire row when the key matches, and // otherwise produces an empty row. // Note that, since row keys can contain arbitrary bytes, the '\C' escape // sequence must be used if a true wildcard is desired. The '.' character // will not match the new line character '\n', which may be present in a // binary key. RowKeyRegexFilter []byte `protobuf:"bytes,4,opt,name=row_key_regex_filter,proto3" json:"row_key_regex_filter,omitempty"` // Matches all cells from a row with probability p, and matches no cells // from the row with probability 1-p. RowSampleFilter float64 `protobuf:"fixed64,14,opt,name=row_sample_filter" json:"row_sample_filter,omitempty"` // Matches only cells from columns whose families satisfy the given RE2 // regex. For technical reasons, the regex must not contain the ':' // character, even if it is not being used as a literal. // Note that, since column families cannot contain the new line character // '\n', it is sufficient to use '.' as a full wildcard when matching // column family names. FamilyNameRegexFilter string `protobuf:"bytes,5,opt,name=family_name_regex_filter" json:"family_name_regex_filter,omitempty"` // Matches only cells from columns whose qualifiers satisfy the given RE2 // regex. // Note that, since column qualifiers can contain arbitrary bytes, the '\C' // escape sequence must be used if a true wildcard is desired. The '.' // character will not match the new line character '\n', which may be // present in a binary qualifier. ColumnQualifierRegexFilter []byte `protobuf:"bytes,6,opt,name=column_qualifier_regex_filter,proto3" json:"column_qualifier_regex_filter,omitempty"` // Matches only cells from columns within the given range. ColumnRangeFilter *ColumnRange `protobuf:"bytes,7,opt,name=column_range_filter" json:"column_range_filter,omitempty"` // Matches only cells with timestamps within the given range. TimestampRangeFilter *TimestampRange `protobuf:"bytes,8,opt,name=timestamp_range_filter" json:"timestamp_range_filter,omitempty"` // Matches only cells with values that satisfy the given regular expression. // Note that, since cell values can contain arbitrary bytes, the '\C' escape // sequence must be used if a true wildcard is desired. The '.' character // will not match the new line character '\n', which may be present in a // binary value. ValueRegexFilter []byte `protobuf:"bytes,9,opt,name=value_regex_filter,proto3" json:"value_regex_filter,omitempty"` // Matches only cells with values that fall within the given range. ValueRangeFilter *ValueRange `protobuf:"bytes,15,opt,name=value_range_filter" json:"value_range_filter,omitempty"` // Skips the first N cells of each row, matching all subsequent cells. CellsPerRowOffsetFilter int32 `protobuf:"varint,10,opt,name=cells_per_row_offset_filter" json:"cells_per_row_offset_filter,omitempty"` // Matches only the first N cells of each row. CellsPerRowLimitFilter int32 `protobuf:"varint,11,opt,name=cells_per_row_limit_filter" json:"cells_per_row_limit_filter,omitempty"` // Matches only the most recent N cells within each column. For example, // if N=2, this filter would match column "foo:bar" at timestamps 10 and 9, // skip all earlier cells in "foo:bar", and then begin matching again in // column "foo:bar2". CellsPerColumnLimitFilter int32 `protobuf:"varint,12,opt,name=cells_per_column_limit_filter" json:"cells_per_column_limit_filter,omitempty"` // Replaces each cell's value with the empty string. StripValueTransformer bool `protobuf:"varint,13,opt,name=strip_value_transformer" json:"strip_value_transformer,omitempty"` } func (m *RowFilter) Reset() { *m = RowFilter{} } func (m *RowFilter) String() string { return proto.CompactTextString(m) } func (*RowFilter) ProtoMessage() {} func (m *RowFilter) GetChain() *RowFilter_Chain { if m != nil { return m.Chain } return nil } func (m *RowFilter) GetInterleave() *RowFilter_Interleave { if m != nil { return m.Interleave } return nil } func (m *RowFilter) GetCondition() *RowFilter_Condition { if m != nil { return m.Condition } return nil } func (m *RowFilter) GetColumnRangeFilter() *ColumnRange { if m != nil { return m.ColumnRangeFilter } return nil } func (m *RowFilter) GetTimestampRangeFilter() *TimestampRange { if m != nil { return m.TimestampRangeFilter } return nil } func (m *RowFilter) GetValueRangeFilter() *ValueRange { if m != nil { return m.ValueRangeFilter } return nil } // A RowFilter which sends rows through several RowFilters in sequence. type RowFilter_Chain struct { // The elements of "filters" are chained together to process the input row: // in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row // The full chain is executed atomically. Filters []*RowFilter `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"` } func (m *RowFilter_Chain) Reset() { *m = RowFilter_Chain{} } func (m *RowFilter_Chain) String() string { return proto.CompactTextString(m) } func (*RowFilter_Chain) ProtoMessage() {} func (m *RowFilter_Chain) GetFilters() []*RowFilter { if m != nil { return m.Filters } return nil } // A RowFilter which sends each row to each of several component // RowFilters and interleaves the results. type RowFilter_Interleave struct { // The elements of "filters" all process a copy of the input row, and the // results are pooled, sorted, and combined into a single output row. // If multiple cells are produced with the same column and timestamp, // they will all appear in the output row in an unspecified mutual order. // Consider the following example, with three filters: // // input row // | // ----------------------------------------------------- // | | | // f(0) f(1) f(2) // | | | // 1: foo,bar,10,x foo,bar,10,z far,bar,7,a // 2: foo,blah,11,z far,blah,5,x far,blah,5,x // | | | // ----------------------------------------------------- // | // 1: foo,bar,10,z // could have switched with #2 // 2: foo,bar,10,x // could have switched with #1 // 3: foo,blah,11,z // 4: far,bar,7,a // 5: far,blah,5,x // identical to #6 // 6: far,blah,5,x // identical to #5 // All interleaved filters are executed atomically. Filters []*RowFilter `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"` } func (m *RowFilter_Interleave) Reset() { *m = RowFilter_Interleave{} } func (m *RowFilter_Interleave) String() string { return proto.CompactTextString(m) } func (*RowFilter_Interleave) ProtoMessage() {} func (m *RowFilter_Interleave) GetFilters() []*RowFilter { if m != nil { return m.Filters } return nil } // A RowFilter which evaluates one of two possible RowFilters, depending on // whether or not a predicate RowFilter outputs any cells from the input row. // // IMPORTANT NOTE: The predicate filter does not execute atomically with the // true and false filters, which may lead to inconsistent or unexpected // results. Additionally, Condition filters have poor performance, especially // when filters are set for the false condition. type RowFilter_Condition struct { // If "predicate_filter" outputs any cells, then "true_filter" will be // evaluated on the input row. Otherwise, "false_filter" will be evaluated. PredicateFilter *RowFilter `protobuf:"bytes,1,opt,name=predicate_filter" json:"predicate_filter,omitempty"` // The filter to apply to the input row if "predicate_filter" returns any // results. If not provided, no results will be returned in the true case. TrueFilter *RowFilter `protobuf:"bytes,2,opt,name=true_filter" json:"true_filter,omitempty"` // The filter to apply to the input row if "predicate_filter" does not // return any results. If not provided, no results will be returned in the // false case. FalseFilter *RowFilter `protobuf:"bytes,3,opt,name=false_filter" json:"false_filter,omitempty"` } func (m *RowFilter_Condition) Reset() { *m = RowFilter_Condition{} } func (m *RowFilter_Condition) String() string { return proto.CompactTextString(m) } func (*RowFilter_Condition) ProtoMessage() {} func (m *RowFilter_Condition) GetPredicateFilter() *RowFilter { if m != nil { return m.PredicateFilter } return nil } func (m *RowFilter_Condition) GetTrueFilter() *RowFilter { if m != nil { return m.TrueFilter } return nil } func (m *RowFilter_Condition) GetFalseFilter() *RowFilter { if m != nil { return m.FalseFilter } return nil } // Specifies a particular change to be made to the contents of a row. type Mutation struct { // Set a cell's value. SetCell *Mutation_SetCell `protobuf:"bytes,1,opt,name=set_cell" json:"set_cell,omitempty"` // Deletes cells from a column. DeleteFromColumn *Mutation_DeleteFromColumn `protobuf:"bytes,2,opt,name=delete_from_column" json:"delete_from_column,omitempty"` // Deletes cells from a column family. DeleteFromFamily *Mutation_DeleteFromFamily `protobuf:"bytes,3,opt,name=delete_from_family" json:"delete_from_family,omitempty"` // Deletes cells from the entire row. DeleteFromRow *Mutation_DeleteFromRow `protobuf:"bytes,4,opt,name=delete_from_row" json:"delete_from_row,omitempty"` } func (m *Mutation) Reset() { *m = Mutation{} } func (m *Mutation) String() string { return proto.CompactTextString(m) } func (*Mutation) ProtoMessage() {} func (m *Mutation) GetSetCell() *Mutation_SetCell { if m != nil { return m.SetCell } return nil } func (m *Mutation) GetDeleteFromColumn() *Mutation_DeleteFromColumn { if m != nil { return m.DeleteFromColumn } return nil } func (m *Mutation) GetDeleteFromFamily() *Mutation_DeleteFromFamily { if m != nil { return m.DeleteFromFamily } return nil } func (m *Mutation) GetDeleteFromRow() *Mutation_DeleteFromRow { if m != nil { return m.DeleteFromRow } return nil } // A Mutation which sets the value of the specified cell. type Mutation_SetCell struct { // The name of the family into which new data should be written. // Must match [-_.a-zA-Z0-9]+ FamilyName string `protobuf:"bytes,1,opt,name=family_name" json:"family_name,omitempty"` // The qualifier of the column into which new data should be written. // Can be any byte string, including the empty string. ColumnQualifier []byte `protobuf:"bytes,2,opt,name=column_qualifier,proto3" json:"column_qualifier,omitempty"` // The timestamp of the cell into which new data should be written. // Use -1 for current Bigtable server time. // Otherwise, the client should set this value itself, noting that the // default value is a timestamp of zero if the field is left unspecified. // Values must match the "granularity" of the table (e.g. micros, millis). TimestampMicros int64 `protobuf:"varint,3,opt,name=timestamp_micros" json:"timestamp_micros,omitempty"` // The value to be written into the specified cell. Value []byte `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` } func (m *Mutation_SetCell) Reset() { *m = Mutation_SetCell{} } func (m *Mutation_SetCell) String() string { return proto.CompactTextString(m) } func (*Mutation_SetCell) ProtoMessage() {} // A Mutation which deletes cells from the specified column, optionally // restricting the deletions to a given timestamp range. type Mutation_DeleteFromColumn struct { // The name of the family from which cells should be deleted. // Must match [-_.a-zA-Z0-9]+ FamilyName string `protobuf:"bytes,1,opt,name=family_name" json:"family_name,omitempty"` // The qualifier of the column from which cells should be deleted. // Can be any byte string, including the empty string. ColumnQualifier []byte `protobuf:"bytes,2,opt,name=column_qualifier,proto3" json:"column_qualifier,omitempty"` // The range of timestamps within which cells should be deleted. TimeRange *TimestampRange `protobuf:"bytes,3,opt,name=time_range" json:"time_range,omitempty"` } func (m *Mutation_DeleteFromColumn) Reset() { *m = Mutation_DeleteFromColumn{} } func (m *Mutation_DeleteFromColumn) String() string { return proto.CompactTextString(m) } func (*Mutation_DeleteFromColumn) ProtoMessage() {} func (m *Mutation_DeleteFromColumn) GetTimeRange() *TimestampRange { if m != nil { return m.TimeRange } return nil } // A Mutation which deletes all cells from the specified column family. type Mutation_DeleteFromFamily struct { // The name of the family from which cells should be deleted. // Must match [-_.a-zA-Z0-9]+ FamilyName string `protobuf:"bytes,1,opt,name=family_name" json:"family_name,omitempty"` } func (m *Mutation_DeleteFromFamily) Reset() { *m = Mutation_DeleteFromFamily{} } func (m *Mutation_DeleteFromFamily) String() string { return proto.CompactTextString(m) } func (*Mutation_DeleteFromFamily) ProtoMessage() {} // A Mutation which deletes all cells from the containing row. type Mutation_DeleteFromRow struct { } func (m *Mutation_DeleteFromRow) Reset() { *m = Mutation_DeleteFromRow{} } func (m *Mutation_DeleteFromRow) String() string { return proto.CompactTextString(m) } func (*Mutation_DeleteFromRow) ProtoMessage() {} // Specifies an atomic read/modify/write operation on the latest value of the // specified column. type ReadModifyWriteRule struct { // The name of the family to which the read/modify/write should be applied. // Must match [-_.a-zA-Z0-9]+ FamilyName string `protobuf:"bytes,1,opt,name=family_name" json:"family_name,omitempty"` // The qualifier of the column to which the read/modify/write should be // applied. // Can be any byte string, including the empty string. ColumnQualifier []byte `protobuf:"bytes,2,opt,name=column_qualifier,proto3" json:"column_qualifier,omitempty"` // Rule specifying that "append_value" be appended to the existing value. // If the targeted cell is unset, it will be treated as containing the // empty string. AppendValue []byte `protobuf:"bytes,3,opt,name=append_value,proto3" json:"append_value,omitempty"` // Rule specifying that "increment_amount" be added to the existing value. // If the targeted cell is unset, it will be treated as containing a zero. // Otherwise, the targeted cell must contain an 8-byte value (interpreted // as a 64-bit big-endian signed integer), or the entire request will fail. IncrementAmount int64 `protobuf:"varint,4,opt,name=increment_amount" json:"increment_amount,omitempty"` } func (m *ReadModifyWriteRule) Reset() { *m = ReadModifyWriteRule{} } func (m *ReadModifyWriteRule) String() string { return proto.CompactTextString(m) } func (*ReadModifyWriteRule) ProtoMessage() {} func init() { } ================================================ FILE: vendor/google.golang.org/cloud/bigtable/internal/data_proto/bigtable_data.proto ================================================ // Copyright (c) 2015, Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package google.bigtable.v1; option java_multiple_files = true; option java_outer_classname = "BigtableDataProto"; option java_package = "com.google.bigtable.v1"; // Specifies the complete (requested) contents of a single row of a table. // Rows which exceed 256MiB in size cannot be read in full. message Row { // The unique key which identifies this row within its table. This is the same // key that's used to identify the row in, for example, a MutateRowRequest. // May contain any non-empty byte string up to 16KiB in length. bytes key = 1; // May be empty, but only if the entire row is empty. // The mutual ordering of column families is not specified. repeated Family families = 2; } // Specifies (some of) the contents of a single row/column family of a table. message Family { // The unique key which identifies this family within its row. This is the // same key that's used to identify the family in, for example, a RowFilter // which sets its "family_name_regex_filter" field. // Must match [-_.a-zA-Z0-9]+, except that AggregatingRowProcessors may // produce cells in a sentinel family with an empty name. // Must be no greater than 64 characters in length. string name = 1; // Must not be empty. Sorted in order of increasing "qualifier". repeated Column columns = 2; } // Specifies (some of) the contents of a single row/column of a table. message Column { // The unique key which identifies this column within its family. This is the // same key that's used to identify the column in, for example, a RowFilter // which sets its "column_qualifier_regex_filter" field. // May contain any byte string, including the empty string, up to 16kiB in // length. bytes qualifier = 1; // Must not be empty. Sorted in order of decreasing "timestamp_micros". repeated Cell cells = 2; } // Specifies (some of) the contents of a single row/column/timestamp of a table. message Cell { // The cell's stored timestamp, which also uniquely identifies it within // its column. // Values are always expressed in microseconds, but individual tables may set // a coarser "granularity" to further restrict the allowed values. For // example, a table which specifies millisecond granularity will only allow // values of "timestamp_micros" which are multiples of 1000. int64 timestamp_micros = 1; // The value stored in the cell. // May contain any byte string, including the empty string, up to 100MiB in // length. bytes value = 2; } // Specifies a contiguous range of rows. message RowRange { // Inclusive lower bound. If left empty, interpreted as the empty string. bytes start_key = 2; // Exclusive upper bound. If left empty, interpreted as infinity. bytes end_key = 3; } // Specifies a contiguous range of columns within a single column family. // The range spans from : to // :, where both bounds can be either inclusive or // exclusive. message ColumnRange { // The name of the column family within which this range falls. string family_name = 1; oneof start_qualifier { // Used when giving an inclusive lower bound for the range. bytes start_qualifier_inclusive = 2; // Used when giving an exclusive lower bound for the range. bytes start_qualifier_exclusive = 3; } oneof end_qualifier { // Used when giving an inclusive upper bound for the range. bytes end_qualifier_inclusive = 4; // Used when giving an exclusive upper bound for the range. bytes end_qualifier_exclusive = 5; } } // Specified a contiguous range of microsecond timestamps. message TimestampRange { // Inclusive lower bound. If left empty, interpreted as 0. int64 start_timestamp_micros = 1; // Exclusive upper bound. If left empty, interpreted as infinity. int64 end_timestamp_micros = 2; } // Specifies a contiguous range of raw byte values. message ValueRange { oneof start_value { // Used when giving an inclusive lower bound for the range. bytes start_value_inclusive = 1; // Used when giving an exclusive lower bound for the range. bytes start_value_exclusive = 2; } oneof end_value { // Used when giving an inclusive upper bound for the range. bytes end_value_inclusive = 3; // Used when giving an exclusive upper bound for the range. bytes end_value_exclusive = 4; } } // Takes a row as input and produces an alternate view of the row based on // specified rules. For example, a RowFilter might trim down a row to include // just the cells from columns matching a given regular expression, or might // return all the cells of a row but not their values. More complicated filters // can be composed out of these components to express requests such as, "within // every column of a particular family, give just the two most recent cells // which are older than timestamp X." // // There are two broad categories of RowFilters (true filters and transformers), // as well as two ways to compose simple filters into more complex ones // (chains and interleaves). They work as follows: // // * True filters alter the input row by excluding some of its cells wholesale // from the output row. An example of a true filter is the "value_regex_filter", // which excludes cells whose values don't match the specified pattern. All // regex true filters use RE2 syntax (https://github.com/google/re2/wiki/Syntax) // in raw byte mode (RE2::Latin1), and are evaluated as full matches. An // important point to keep in mind is that RE2(.) is equivalent by default to // RE2([^\n]), meaning that it does not match newlines. When attempting to match // an arbitrary byte, you should therefore use the escape sequence '\C', which // may need to be further escaped as '\\C' in your client language. // // * Transformers alter the input row by changing the values of some of its // cells in the output, without excluding them completely. Currently, the only // supported transformer is the "strip_value_transformer", which replaces every // cell's value with the empty string. // // * Chains and interleaves are described in more detail in the // RowFilter.Chain and RowFilter.Interleave documentation. // // The total serialized size of a RowFilter message must not // exceed 4096 bytes, and RowFilters may not be nested within each other // (in Chains or Interleaves) to a depth of more than 20. message RowFilter { // A RowFilter which sends rows through several RowFilters in sequence. message Chain { // The elements of "filters" are chained together to process the input row: // in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row // The full chain is executed atomically. repeated RowFilter filters = 1; } // A RowFilter which sends each row to each of several component // RowFilters and interleaves the results. message Interleave { // The elements of "filters" all process a copy of the input row, and the // results are pooled, sorted, and combined into a single output row. // If multiple cells are produced with the same column and timestamp, // they will all appear in the output row in an unspecified mutual order. // Consider the following example, with three filters: // // input row // | // ----------------------------------------------------- // | | | // f(0) f(1) f(2) // | | | // 1: foo,bar,10,x foo,bar,10,z far,bar,7,a // 2: foo,blah,11,z far,blah,5,x far,blah,5,x // | | | // ----------------------------------------------------- // | // 1: foo,bar,10,z // could have switched with #2 // 2: foo,bar,10,x // could have switched with #1 // 3: foo,blah,11,z // 4: far,bar,7,a // 5: far,blah,5,x // identical to #6 // 6: far,blah,5,x // identical to #5 // All interleaved filters are executed atomically. repeated RowFilter filters = 1; } // A RowFilter which evaluates one of two possible RowFilters, depending on // whether or not a predicate RowFilter outputs any cells from the input row. // // IMPORTANT NOTE: The predicate filter does not execute atomically with the // true and false filters, which may lead to inconsistent or unexpected // results. Additionally, Condition filters have poor performance, especially // when filters are set for the false condition. message Condition { // If "predicate_filter" outputs any cells, then "true_filter" will be // evaluated on the input row. Otherwise, "false_filter" will be evaluated. RowFilter predicate_filter = 1; // The filter to apply to the input row if "predicate_filter" returns any // results. If not provided, no results will be returned in the true case. RowFilter true_filter = 2; // The filter to apply to the input row if "predicate_filter" does not // return any results. If not provided, no results will be returned in the // false case. RowFilter false_filter = 3; } oneof filter { // Applies several RowFilters to the data in sequence, progressively // narrowing the results. Chain chain = 1; // Applies several RowFilters to the data in parallel and combines the // results. Interleave interleave = 2; // Applies one of two possible RowFilters to the data based on the output of // a predicate RowFilter. Condition condition = 3; // Matches only cells from rows whose keys satisfy the given RE2 regex. In // other words, passes through the entire row when the key matches, and // otherwise produces an empty row. // Note that, since row keys can contain arbitrary bytes, the '\C' escape // sequence must be used if a true wildcard is desired. The '.' character // will not match the new line character '\n', which may be present in a // binary key. bytes row_key_regex_filter = 4; // Matches all cells from a row with probability p, and matches no cells // from the row with probability 1-p. double row_sample_filter = 14; // Matches only cells from columns whose families satisfy the given RE2 // regex. For technical reasons, the regex must not contain the ':' // character, even if it is not being used as a literal. // Note that, since column families cannot contain the new line character // '\n', it is sufficient to use '.' as a full wildcard when matching // column family names. string family_name_regex_filter = 5; // Matches only cells from columns whose qualifiers satisfy the given RE2 // regex. // Note that, since column qualifiers can contain arbitrary bytes, the '\C' // escape sequence must be used if a true wildcard is desired. The '.' // character will not match the new line character '\n', which may be // present in a binary qualifier. bytes column_qualifier_regex_filter = 6; // Matches only cells from columns within the given range. ColumnRange column_range_filter = 7; // Matches only cells with timestamps within the given range. TimestampRange timestamp_range_filter = 8; // Matches only cells with values that satisfy the given regular expression. // Note that, since cell values can contain arbitrary bytes, the '\C' escape // sequence must be used if a true wildcard is desired. The '.' character // will not match the new line character '\n', which may be present in a // binary value. bytes value_regex_filter = 9; // Matches only cells with values that fall within the given range. ValueRange value_range_filter = 15; // Skips the first N cells of each row, matching all subsequent cells. int32 cells_per_row_offset_filter = 10; // Matches only the first N cells of each row. int32 cells_per_row_limit_filter = 11; // Matches only the most recent N cells within each column. For example, // if N=2, this filter would match column "foo:bar" at timestamps 10 and 9, // skip all earlier cells in "foo:bar", and then begin matching again in // column "foo:bar2". int32 cells_per_column_limit_filter = 12; // Replaces each cell's value with the empty string. bool strip_value_transformer = 13; } } // Specifies a particular change to be made to the contents of a row. message Mutation { // A Mutation which sets the value of the specified cell. message SetCell { // The name of the family into which new data should be written. // Must match [-_.a-zA-Z0-9]+ string family_name = 1; // The qualifier of the column into which new data should be written. // Can be any byte string, including the empty string. bytes column_qualifier = 2; // The timestamp of the cell into which new data should be written. // Use -1 for current Bigtable server time. // Otherwise, the client should set this value itself, noting that the // default value is a timestamp of zero if the field is left unspecified. // Values must match the "granularity" of the table (e.g. micros, millis). int64 timestamp_micros = 3; // The value to be written into the specified cell. bytes value = 4; } // A Mutation which deletes cells from the specified column, optionally // restricting the deletions to a given timestamp range. message DeleteFromColumn { // The name of the family from which cells should be deleted. // Must match [-_.a-zA-Z0-9]+ string family_name = 1; // The qualifier of the column from which cells should be deleted. // Can be any byte string, including the empty string. bytes column_qualifier = 2; // The range of timestamps within which cells should be deleted. TimestampRange time_range = 3; } // A Mutation which deletes all cells from the specified column family. message DeleteFromFamily { // The name of the family from which cells should be deleted. // Must match [-_.a-zA-Z0-9]+ string family_name = 1; } // A Mutation which deletes all cells from the containing row. message DeleteFromRow { } oneof mutation { // Set a cell's value. SetCell set_cell = 1; // Deletes cells from a column. DeleteFromColumn delete_from_column = 2; // Deletes cells from a column family. DeleteFromFamily delete_from_family = 3; // Deletes cells from the entire row. DeleteFromRow delete_from_row = 4; } } // Specifies an atomic read/modify/write operation on the latest value of the // specified column. message ReadModifyWriteRule { // The name of the family to which the read/modify/write should be applied. // Must match [-_.a-zA-Z0-9]+ string family_name = 1; // The qualifier of the column to which the read/modify/write should be // applied. // Can be any byte string, including the empty string. bytes column_qualifier = 2; oneof rule { // Rule specifying that "append_value" be appended to the existing value. // If the targeted cell is unset, it will be treated as containing the // empty string. bytes append_value = 3; // Rule specifying that "increment_amount" be added to the existing value. // If the targeted cell is unset, it will be treated as containing a zero. // Otherwise, the targeted cell must contain an 8-byte value (interpreted // as a 64-bit big-endian signed integer), or the entire request will fail. int64 increment_amount = 4; } } ================================================ FILE: vendor/google.golang.org/cloud/bigtable/internal/empty/empty.pb.go ================================================ // Code generated by protoc-gen-go. // source: google.golang.org/cloud/bigtable/internal/empty/empty.proto // DO NOT EDIT! /* Package google_protobuf is a generated protocol buffer package. It is generated from these files: google.golang.org/cloud/bigtable/internal/empty/empty.proto It has these top-level messages: Empty */ package google_protobuf import proto "github.com/golang/protobuf/proto" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal // A generic empty message that you can re-use to avoid defining duplicated // empty messages in your APIs. A typical example is to use it as the request // or the response type of an API method. For instance: // // service Foo { // rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); // } // type Empty struct { } func (m *Empty) Reset() { *m = Empty{} } func (m *Empty) String() string { return proto.CompactTextString(m) } func (*Empty) ProtoMessage() {} func init() { } ================================================ FILE: vendor/google.golang.org/cloud/bigtable/internal/empty/empty.proto ================================================ // Copyright (c) 2015, Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package google.protobuf; option java_multiple_files = true; option java_outer_classname = "EmptyProto"; option java_package = "com.google.protobuf"; // A generic empty message that you can re-use to avoid defining duplicated // empty messages in your APIs. A typical example is to use it as the request // or the response type of an API method. For instance: // // service Foo { // rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); // } // message Empty { } ================================================ FILE: vendor/google.golang.org/cloud/bigtable/internal/regen.sh ================================================ #!/bin/bash -e # # This script rebuilds the generated code for the protocol buffers. # To run this you will need protoc and goprotobuf installed; # see https://github.com/golang/protobuf for instructions. # You also need Go and Git installed. PKG=google.golang.org/cloud/bigtable UPSTREAM=https://github.com/GoogleCloudPlatform/cloud-bigtable-client UPSTREAM_SUBDIR=bigtable-protos/src/main/proto function die() { echo 1>&2 $* exit 1 } # Sanity check that the right tools are accessible. for tool in go git protoc protoc-gen-go; do q=$(which $tool) || die "didn't find $tool" echo 1>&2 "$tool: $q" done tmpdir=$(mktemp -d -t regen-cbt.XXXXXX) trap 'rm -rf $tmpdir' EXIT echo -n 1>&2 "finding package dir... " pkgdir=$(go list -f '{{.Dir}}' $PKG) echo 1>&2 $pkgdir base=$(echo $pkgdir | sed "s,/$PKG\$,,") echo 1>&2 "base: $base" cd $base echo 1>&2 "fetching latest protos... " git clone -q $UPSTREAM $tmpdir # Pass 1: build mapping from upstream filename to our filename. declare -A filename_map for f in $(cd $PKG && find internal -name '*.proto'); do echo -n 1>&2 "looking for latest version of $f... " up=$(cd $tmpdir/$UPSTREAM_SUBDIR && find * -name $(basename $f)) echo 1>&2 $up if [ $(echo $up | wc -w) != "1" ]; then die "not exactly one match" fi filename_map[$up]=$f done # Pass 2: build sed script for fixing imports. import_fixes=$tmpdir/fix_imports.sed for up in "${!filename_map[@]}"; do f=${filename_map[$up]} echo >>$import_fixes "s,\"$up\",\"$PKG/$f\"," done cat $import_fixes | sed 's,^,### ,' 1>&2 # Pass 3: copy files, making necessary adjustments. for up in "${!filename_map[@]}"; do f=${filename_map[$up]} cat $tmpdir/$UPSTREAM_SUBDIR/$up | # Adjust proto imports. sed -f $import_fixes | # Drop the UndeleteCluster RPC method. It returns a google.longrunning.Operation. sed '/^ rpc UndeleteCluster(/,/^ }$/d' | # Drop annotations, long-running operations and timestamps. They aren't supported (yet). sed '/"google\/longrunning\/operations.proto"/d' | sed '/google.longrunning.Operation/d' | sed '/"google\/protobuf\/timestamp.proto"/d' | sed '/google\.protobuf\.Timestamp/d' | sed '/"google\/api\/annotations.proto"/d' | sed '/option.*google\.api\.http.*{.*};$/d' | cat > $PKG/$f done # Run protoc once per package. for dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do echo 1>&2 "* $dir" protoc --go_out=plugins=grpc:. $dir/*.proto done echo 1>&2 "All OK" ================================================ FILE: vendor/google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service.pb.go ================================================ // Code generated by protoc-gen-go. // source: google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service.proto // DO NOT EDIT! package google_bigtable_v1 import proto "github.com/golang/protobuf/proto" import google_bigtable_v11 "google.golang.org/cloud/bigtable/internal/data_proto" import google_protobuf "google.golang.org/cloud/bigtable/internal/empty" import ( context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal func init() { } // Client API for BigtableService service type BigtableServiceClient interface { // Streams back the contents of all requested rows, optionally applying // the same Reader filter to each. Depending on their size, rows may be // broken up across multiple responses, but atomicity of each row will still // be preserved. ReadRows(ctx context.Context, in *ReadRowsRequest, opts ...grpc.CallOption) (BigtableService_ReadRowsClient, error) // Returns a sample of row keys in the table. The returned row keys will // delimit contiguous sections of the table of approximately equal size, // which can be used to break up the data for distributed tasks like // mapreduces. SampleRowKeys(ctx context.Context, in *SampleRowKeysRequest, opts ...grpc.CallOption) (BigtableService_SampleRowKeysClient, error) // Mutates a row atomically. Cells already present in the row are left // unchanged unless explicitly changed by 'mutation'. MutateRow(ctx context.Context, in *MutateRowRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) // Mutates a row atomically based on the output of a predicate Reader filter. CheckAndMutateRow(ctx context.Context, in *CheckAndMutateRowRequest, opts ...grpc.CallOption) (*CheckAndMutateRowResponse, error) // Modifies a row atomically, reading the latest existing timestamp/value from // the specified columns and writing a new value at // max(existing timestamp, current server time) based on pre-defined // read/modify/write rules. Returns the new contents of all modified cells. ReadModifyWriteRow(ctx context.Context, in *ReadModifyWriteRowRequest, opts ...grpc.CallOption) (*google_bigtable_v11.Row, error) } type bigtableServiceClient struct { cc *grpc.ClientConn } func NewBigtableServiceClient(cc *grpc.ClientConn) BigtableServiceClient { return &bigtableServiceClient{cc} } func (c *bigtableServiceClient) ReadRows(ctx context.Context, in *ReadRowsRequest, opts ...grpc.CallOption) (BigtableService_ReadRowsClient, error) { stream, err := grpc.NewClientStream(ctx, &_BigtableService_serviceDesc.Streams[0], c.cc, "/google.bigtable.v1.BigtableService/ReadRows", opts...) if err != nil { return nil, err } x := &bigtableServiceReadRowsClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type BigtableService_ReadRowsClient interface { Recv() (*ReadRowsResponse, error) grpc.ClientStream } type bigtableServiceReadRowsClient struct { grpc.ClientStream } func (x *bigtableServiceReadRowsClient) Recv() (*ReadRowsResponse, error) { m := new(ReadRowsResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *bigtableServiceClient) SampleRowKeys(ctx context.Context, in *SampleRowKeysRequest, opts ...grpc.CallOption) (BigtableService_SampleRowKeysClient, error) { stream, err := grpc.NewClientStream(ctx, &_BigtableService_serviceDesc.Streams[1], c.cc, "/google.bigtable.v1.BigtableService/SampleRowKeys", opts...) if err != nil { return nil, err } x := &bigtableServiceSampleRowKeysClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type BigtableService_SampleRowKeysClient interface { Recv() (*SampleRowKeysResponse, error) grpc.ClientStream } type bigtableServiceSampleRowKeysClient struct { grpc.ClientStream } func (x *bigtableServiceSampleRowKeysClient) Recv() (*SampleRowKeysResponse, error) { m := new(SampleRowKeysResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *bigtableServiceClient) MutateRow(ctx context.Context, in *MutateRowRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) { out := new(google_protobuf.Empty) err := grpc.Invoke(ctx, "/google.bigtable.v1.BigtableService/MutateRow", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *bigtableServiceClient) CheckAndMutateRow(ctx context.Context, in *CheckAndMutateRowRequest, opts ...grpc.CallOption) (*CheckAndMutateRowResponse, error) { out := new(CheckAndMutateRowResponse) err := grpc.Invoke(ctx, "/google.bigtable.v1.BigtableService/CheckAndMutateRow", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *bigtableServiceClient) ReadModifyWriteRow(ctx context.Context, in *ReadModifyWriteRowRequest, opts ...grpc.CallOption) (*google_bigtable_v11.Row, error) { out := new(google_bigtable_v11.Row) err := grpc.Invoke(ctx, "/google.bigtable.v1.BigtableService/ReadModifyWriteRow", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } // Server API for BigtableService service type BigtableServiceServer interface { // Streams back the contents of all requested rows, optionally applying // the same Reader filter to each. Depending on their size, rows may be // broken up across multiple responses, but atomicity of each row will still // be preserved. ReadRows(*ReadRowsRequest, BigtableService_ReadRowsServer) error // Returns a sample of row keys in the table. The returned row keys will // delimit contiguous sections of the table of approximately equal size, // which can be used to break up the data for distributed tasks like // mapreduces. SampleRowKeys(*SampleRowKeysRequest, BigtableService_SampleRowKeysServer) error // Mutates a row atomically. Cells already present in the row are left // unchanged unless explicitly changed by 'mutation'. MutateRow(context.Context, *MutateRowRequest) (*google_protobuf.Empty, error) // Mutates a row atomically based on the output of a predicate Reader filter. CheckAndMutateRow(context.Context, *CheckAndMutateRowRequest) (*CheckAndMutateRowResponse, error) // Modifies a row atomically, reading the latest existing timestamp/value from // the specified columns and writing a new value at // max(existing timestamp, current server time) based on pre-defined // read/modify/write rules. Returns the new contents of all modified cells. ReadModifyWriteRow(context.Context, *ReadModifyWriteRowRequest) (*google_bigtable_v11.Row, error) } func RegisterBigtableServiceServer(s *grpc.Server, srv BigtableServiceServer) { s.RegisterService(&_BigtableService_serviceDesc, srv) } func _BigtableService_ReadRows_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(ReadRowsRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(BigtableServiceServer).ReadRows(m, &bigtableServiceReadRowsServer{stream}) } type BigtableService_ReadRowsServer interface { Send(*ReadRowsResponse) error grpc.ServerStream } type bigtableServiceReadRowsServer struct { grpc.ServerStream } func (x *bigtableServiceReadRowsServer) Send(m *ReadRowsResponse) error { return x.ServerStream.SendMsg(m) } func _BigtableService_SampleRowKeys_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(SampleRowKeysRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(BigtableServiceServer).SampleRowKeys(m, &bigtableServiceSampleRowKeysServer{stream}) } type BigtableService_SampleRowKeysServer interface { Send(*SampleRowKeysResponse) error grpc.ServerStream } type bigtableServiceSampleRowKeysServer struct { grpc.ServerStream } func (x *bigtableServiceSampleRowKeysServer) Send(m *SampleRowKeysResponse) error { return x.ServerStream.SendMsg(m) } func _BigtableService_MutateRow_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { in := new(MutateRowRequest) if err := codec.Unmarshal(buf, in); err != nil { return nil, err } out, err := srv.(BigtableServiceServer).MutateRow(ctx, in) if err != nil { return nil, err } return out, nil } func _BigtableService_CheckAndMutateRow_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { in := new(CheckAndMutateRowRequest) if err := codec.Unmarshal(buf, in); err != nil { return nil, err } out, err := srv.(BigtableServiceServer).CheckAndMutateRow(ctx, in) if err != nil { return nil, err } return out, nil } func _BigtableService_ReadModifyWriteRow_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { in := new(ReadModifyWriteRowRequest) if err := codec.Unmarshal(buf, in); err != nil { return nil, err } out, err := srv.(BigtableServiceServer).ReadModifyWriteRow(ctx, in) if err != nil { return nil, err } return out, nil } var _BigtableService_serviceDesc = grpc.ServiceDesc{ ServiceName: "google.bigtable.v1.BigtableService", HandlerType: (*BigtableServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "MutateRow", Handler: _BigtableService_MutateRow_Handler, }, { MethodName: "CheckAndMutateRow", Handler: _BigtableService_CheckAndMutateRow_Handler, }, { MethodName: "ReadModifyWriteRow", Handler: _BigtableService_ReadModifyWriteRow_Handler, }, }, Streams: []grpc.StreamDesc{ { StreamName: "ReadRows", Handler: _BigtableService_ReadRows_Handler, ServerStreams: true, }, { StreamName: "SampleRowKeys", Handler: _BigtableService_SampleRowKeys_Handler, ServerStreams: true, }, }, } ================================================ FILE: vendor/google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service.proto ================================================ // Copyright (c) 2015, Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package google.bigtable.v1; import "google.golang.org/cloud/bigtable/internal/data_proto/bigtable_data.proto"; import "google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service_messages.proto"; import "google.golang.org/cloud/bigtable/internal/empty/empty.proto"; option java_generic_services = true; option java_multiple_files = true; option java_outer_classname = "BigtableServicesProto"; option java_package = "com.google.bigtable.v1"; // Service for reading from and writing to existing Bigtables. service BigtableService { // Streams back the contents of all requested rows, optionally applying // the same Reader filter to each. Depending on their size, rows may be // broken up across multiple responses, but atomicity of each row will still // be preserved. rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) { } // Returns a sample of row keys in the table. The returned row keys will // delimit contiguous sections of the table of approximately equal size, // which can be used to break up the data for distributed tasks like // mapreduces. rpc SampleRowKeys(SampleRowKeysRequest) returns (stream SampleRowKeysResponse) { } // Mutates a row atomically. Cells already present in the row are left // unchanged unless explicitly changed by 'mutation'. rpc MutateRow(MutateRowRequest) returns (google.protobuf.Empty) { } // Mutates a row atomically based on the output of a predicate Reader filter. rpc CheckAndMutateRow(CheckAndMutateRowRequest) returns (CheckAndMutateRowResponse) { } // Modifies a row atomically, reading the latest existing timestamp/value from // the specified columns and writing a new value at // max(existing timestamp, current server time) based on pre-defined // read/modify/write rules. Returns the new contents of all modified cells. rpc ReadModifyWriteRow(ReadModifyWriteRowRequest) returns (Row) { } } ================================================ FILE: vendor/google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service_messages.pb.go ================================================ // Code generated by protoc-gen-go. // source: google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service_messages.proto // DO NOT EDIT! /* Package google_bigtable_v1 is a generated protocol buffer package. It is generated from these files: google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service_messages.proto google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service.proto It has these top-level messages: ReadRowsRequest ReadRowsResponse SampleRowKeysRequest SampleRowKeysResponse MutateRowRequest CheckAndMutateRowRequest CheckAndMutateRowResponse ReadModifyWriteRowRequest */ package google_bigtable_v1 import proto "github.com/golang/protobuf/proto" import google_bigtable_v11 "google.golang.org/cloud/bigtable/internal/data_proto" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal // Request message for BigtableServer.ReadRows. type ReadRowsRequest struct { // The unique name of the table from which to read. TableName string `protobuf:"bytes,1,opt,name=table_name" json:"table_name,omitempty"` // The key of a single row from which to read. RowKey []byte `protobuf:"bytes,2,opt,name=row_key,proto3" json:"row_key,omitempty"` // A range of rows from which to read. RowRange *google_bigtable_v11.RowRange `protobuf:"bytes,3,opt,name=row_range" json:"row_range,omitempty"` // The filter to apply to the contents of the specified row(s), in the // deprecated string format. If unset, reads the most recent value from all // readable columns. DEPRECATEDStringFilter string `protobuf:"bytes,4,opt,name=DEPRECATED_string_filter" json:"DEPRECATED_string_filter,omitempty"` // The filter to apply to the contents of the specified row(s). If unset, // reads the entire table. Filter *google_bigtable_v11.RowFilter `protobuf:"bytes,5,opt,name=filter" json:"filter,omitempty"` // By default, rows are read sequentially, producing results which are // guaranteed to arrive in increasing row order. Setting // "allow_row_interleaving" to true allows multiple rows to be interleaved in // the response stream, which increases throughput but breaks this guarantee, // and may force the client to use more memory to buffer partially-received // rows. AllowRowInterleaving bool `protobuf:"varint,6,opt,name=allow_row_interleaving" json:"allow_row_interleaving,omitempty"` // The read will terminate after committing to N rows' worth of results. The // default (zero) is to return all results. // Note that if "allow_row_interleaving" is set to true, partial results may // be returned for more than N rows. However, only N "commit_row" chunks will // be sent. NumRowsLimit int64 `protobuf:"varint,7,opt,name=num_rows_limit" json:"num_rows_limit,omitempty"` } func (m *ReadRowsRequest) Reset() { *m = ReadRowsRequest{} } func (m *ReadRowsRequest) String() string { return proto.CompactTextString(m) } func (*ReadRowsRequest) ProtoMessage() {} func (m *ReadRowsRequest) GetRowRange() *google_bigtable_v11.RowRange { if m != nil { return m.RowRange } return nil } func (m *ReadRowsRequest) GetFilter() *google_bigtable_v11.RowFilter { if m != nil { return m.Filter } return nil } // Response message for BigtableService.ReadRows. type ReadRowsResponse struct { // The key of the row for which we're receiving data. // Results will be received in increasing row key order, unless // "allow_row_interleaving" was specified in the request. RowKey []byte `protobuf:"bytes,1,opt,name=row_key,proto3" json:"row_key,omitempty"` // One or more chunks of the row specified by "row_key". Chunks []*ReadRowsResponse_Chunk `protobuf:"bytes,2,rep,name=chunks" json:"chunks,omitempty"` } func (m *ReadRowsResponse) Reset() { *m = ReadRowsResponse{} } func (m *ReadRowsResponse) String() string { return proto.CompactTextString(m) } func (*ReadRowsResponse) ProtoMessage() {} func (m *ReadRowsResponse) GetChunks() []*ReadRowsResponse_Chunk { if m != nil { return m.Chunks } return nil } // Specifies a piece of a row's contents returned as part of the read // response stream. type ReadRowsResponse_Chunk struct { // A subset of the data from a particular row. As long as no "reset_row" // is received in between, multiple "row_contents" from the same row are // from the same atomic view of that row, and will be received in the // expected family/column/timestamp order. RowContents *google_bigtable_v11.Family `protobuf:"bytes,1,opt,name=row_contents" json:"row_contents,omitempty"` // Indicates that the client should drop all previous chunks for // "row_key", as it will be re-read from the beginning. ResetRow bool `protobuf:"varint,2,opt,name=reset_row" json:"reset_row,omitempty"` // Indicates that the client can safely process all previous chunks for // "row_key", as its data has been fully read. CommitRow bool `protobuf:"varint,3,opt,name=commit_row" json:"commit_row,omitempty"` } func (m *ReadRowsResponse_Chunk) Reset() { *m = ReadRowsResponse_Chunk{} } func (m *ReadRowsResponse_Chunk) String() string { return proto.CompactTextString(m) } func (*ReadRowsResponse_Chunk) ProtoMessage() {} func (m *ReadRowsResponse_Chunk) GetRowContents() *google_bigtable_v11.Family { if m != nil { return m.RowContents } return nil } // Request message for BigtableService.SampleRowKeys. type SampleRowKeysRequest struct { // The unique name of the table from which to sample row keys. TableName string `protobuf:"bytes,1,opt,name=table_name" json:"table_name,omitempty"` } func (m *SampleRowKeysRequest) Reset() { *m = SampleRowKeysRequest{} } func (m *SampleRowKeysRequest) String() string { return proto.CompactTextString(m) } func (*SampleRowKeysRequest) ProtoMessage() {} // Response message for BigtableService.SampleRowKeys. type SampleRowKeysResponse struct { // Sorted streamed sequence of sample row keys in the table. The table might // have contents before the first row key in the list and after the last one, // but a key containing the empty string indicates "end of table" and will be // the last response given, if present. // Note that row keys in this list may not have ever been written to or read // from, and users should therefore not make any assumptions about the row key // structure that are specific to their use case. RowKey []byte `protobuf:"bytes,1,opt,name=row_key,proto3" json:"row_key,omitempty"` // Approximate total storage space used by all rows in the table which precede // "row_key". Buffering the contents of all rows between two subsequent // samples would require space roughly equal to the difference in their // "offset_bytes" fields. OffsetBytes int64 `protobuf:"varint,2,opt,name=offset_bytes" json:"offset_bytes,omitempty"` } func (m *SampleRowKeysResponse) Reset() { *m = SampleRowKeysResponse{} } func (m *SampleRowKeysResponse) String() string { return proto.CompactTextString(m) } func (*SampleRowKeysResponse) ProtoMessage() {} // Request message for BigtableService.MutateRow. type MutateRowRequest struct { // The unique name of the table to which the mutation should be applied. TableName string `protobuf:"bytes,1,opt,name=table_name" json:"table_name,omitempty"` // The key of the row to which the mutation should be applied. RowKey []byte `protobuf:"bytes,2,opt,name=row_key,proto3" json:"row_key,omitempty"` // Changes to be atomically applied to the specified row. Entries are applied // in order, meaning that earlier mutations can be masked by later ones. Mutations []*google_bigtable_v11.Mutation `protobuf:"bytes,3,rep,name=mutations" json:"mutations,omitempty"` } func (m *MutateRowRequest) Reset() { *m = MutateRowRequest{} } func (m *MutateRowRequest) String() string { return proto.CompactTextString(m) } func (*MutateRowRequest) ProtoMessage() {} func (m *MutateRowRequest) GetMutations() []*google_bigtable_v11.Mutation { if m != nil { return m.Mutations } return nil } // Request message for BigtableService.CheckAndMutateRowRequest type CheckAndMutateRowRequest struct { // The unique name of the table to which the conditional mutation should be // applied. TableName string `protobuf:"bytes,1,opt,name=table_name" json:"table_name,omitempty"` // The key of the row to which the conditional mutation should be applied. RowKey []byte `protobuf:"bytes,2,opt,name=row_key,proto3" json:"row_key,omitempty"` // Changes to be atomically applied to the specified row if "predicate_filter" // yields at least one cell when applied to "row_key". Entries are applied in // order, meaning that earlier mutations can be masked by later ones. // Must contain at least one entry if "false_mutations" is empty. TrueMutations []*google_bigtable_v11.Mutation `protobuf:"bytes,4,rep,name=true_mutations" json:"true_mutations,omitempty"` // Changes to be atomically applied to the specified row if "predicate_filter" // does not yield any cells when applied to "row_key". Entries are applied in // order, meaning that earlier mutations can be masked by later ones. // Must contain at least one entry if "true_mutations" is empty. FalseMutations []*google_bigtable_v11.Mutation `protobuf:"bytes,5,rep,name=false_mutations" json:"false_mutations,omitempty"` // The filter to be applied to the contents of the specified row, in the // deprecated string format. Depending on whether or not any results are // yielded, either "true_mutations" or "false_mutations" will be executed. If // unset, checks that the row contains any values at all. DEPRECATEDField_3 string `protobuf:"bytes,3,opt,name=DEPRECATED_field_3" json:"DEPRECATED_field_3,omitempty"` // The filter to be applied to the contents of the specified row. Depending // on whether or not any results are yielded, either "true_mutations" or // "false_mutations" will be executed. If unset, checks that the row contains // any values at all. PredicateFilter *google_bigtable_v11.RowFilter `protobuf:"bytes,6,opt,name=predicate_filter" json:"predicate_filter,omitempty"` } func (m *CheckAndMutateRowRequest) Reset() { *m = CheckAndMutateRowRequest{} } func (m *CheckAndMutateRowRequest) String() string { return proto.CompactTextString(m) } func (*CheckAndMutateRowRequest) ProtoMessage() {} func (m *CheckAndMutateRowRequest) GetTrueMutations() []*google_bigtable_v11.Mutation { if m != nil { return m.TrueMutations } return nil } func (m *CheckAndMutateRowRequest) GetFalseMutations() []*google_bigtable_v11.Mutation { if m != nil { return m.FalseMutations } return nil } func (m *CheckAndMutateRowRequest) GetPredicateFilter() *google_bigtable_v11.RowFilter { if m != nil { return m.PredicateFilter } return nil } // Response message for BigtableService.CheckAndMutateRowRequest. type CheckAndMutateRowResponse struct { // Whether or not the request's "predicate_filter" yielded any results for // the specified row. PredicateMatched bool `protobuf:"varint,1,opt,name=predicate_matched" json:"predicate_matched,omitempty"` } func (m *CheckAndMutateRowResponse) Reset() { *m = CheckAndMutateRowResponse{} } func (m *CheckAndMutateRowResponse) String() string { return proto.CompactTextString(m) } func (*CheckAndMutateRowResponse) ProtoMessage() {} // Request message for BigtableService.ReadModifyWriteRowRequest. type ReadModifyWriteRowRequest struct { // The unique name of the table to which the read/modify/write rules should be // applied. TableName string `protobuf:"bytes,1,opt,name=table_name" json:"table_name,omitempty"` // The key of the row to which the read/modify/write rules should be applied. RowKey []byte `protobuf:"bytes,2,opt,name=row_key,proto3" json:"row_key,omitempty"` // Rules specifying how the specified row's contents are to be transformed // into writes. Entries are applied in order, meaning that earlier rules will // affect the results of later ones. Rules []*google_bigtable_v11.ReadModifyWriteRule `protobuf:"bytes,3,rep,name=rules" json:"rules,omitempty"` } func (m *ReadModifyWriteRowRequest) Reset() { *m = ReadModifyWriteRowRequest{} } func (m *ReadModifyWriteRowRequest) String() string { return proto.CompactTextString(m) } func (*ReadModifyWriteRowRequest) ProtoMessage() {} func (m *ReadModifyWriteRowRequest) GetRules() []*google_bigtable_v11.ReadModifyWriteRule { if m != nil { return m.Rules } return nil } func init() { } ================================================ FILE: vendor/google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service_messages.proto ================================================ // Copyright (c) 2015, Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package google.bigtable.v1; import "google.golang.org/cloud/bigtable/internal/data_proto/bigtable_data.proto"; option java_multiple_files = true; option java_outer_classname = "BigtableServiceMessagesProto"; option java_package = "com.google.bigtable.v1"; // Request message for BigtableServer.ReadRows. message ReadRowsRequest { // The unique name of the table from which to read. string table_name = 1; oneof target { // The key of a single row from which to read. bytes row_key = 2; // A range of rows from which to read. RowRange row_range = 3; } // The filter to apply to the contents of the specified row(s), in the // deprecated string format. If unset, reads the most recent value from all // readable columns. string DEPRECATED_string_filter = 4; // The filter to apply to the contents of the specified row(s). If unset, // reads the entire table. RowFilter filter = 5; // By default, rows are read sequentially, producing results which are // guaranteed to arrive in increasing row order. Setting // "allow_row_interleaving" to true allows multiple rows to be interleaved in // the response stream, which increases throughput but breaks this guarantee, // and may force the client to use more memory to buffer partially-received // rows. bool allow_row_interleaving = 6; // The read will terminate after committing to N rows' worth of results. The // default (zero) is to return all results. // Note that if "allow_row_interleaving" is set to true, partial results may // be returned for more than N rows. However, only N "commit_row" chunks will // be sent. int64 num_rows_limit = 7; } // Response message for BigtableService.ReadRows. message ReadRowsResponse { // Specifies a piece of a row's contents returned as part of the read // response stream. message Chunk { oneof chunk { // A subset of the data from a particular row. As long as no "reset_row" // is received in between, multiple "row_contents" from the same row are // from the same atomic view of that row, and will be received in the // expected family/column/timestamp order. Family row_contents = 1; // Indicates that the client should drop all previous chunks for // "row_key", as it will be re-read from the beginning. bool reset_row = 2; // Indicates that the client can safely process all previous chunks for // "row_key", as its data has been fully read. bool commit_row = 3; } } // The key of the row for which we're receiving data. // Results will be received in increasing row key order, unless // "allow_row_interleaving" was specified in the request. bytes row_key = 1; // One or more chunks of the row specified by "row_key". repeated Chunk chunks = 2; } // Request message for BigtableService.SampleRowKeys. message SampleRowKeysRequest { // The unique name of the table from which to sample row keys. string table_name = 1; } // Response message for BigtableService.SampleRowKeys. message SampleRowKeysResponse { // Sorted streamed sequence of sample row keys in the table. The table might // have contents before the first row key in the list and after the last one, // but a key containing the empty string indicates "end of table" and will be // the last response given, if present. // Note that row keys in this list may not have ever been written to or read // from, and users should therefore not make any assumptions about the row key // structure that are specific to their use case. bytes row_key = 1; // Approximate total storage space used by all rows in the table which precede // "row_key". Buffering the contents of all rows between two subsequent // samples would require space roughly equal to the difference in their // "offset_bytes" fields. int64 offset_bytes = 2; } // Request message for BigtableService.MutateRow. message MutateRowRequest { // The unique name of the table to which the mutation should be applied. string table_name = 1; // The key of the row to which the mutation should be applied. bytes row_key = 2; // Changes to be atomically applied to the specified row. Entries are applied // in order, meaning that earlier mutations can be masked by later ones. repeated Mutation mutations = 3; } // Request message for BigtableService.CheckAndMutateRowRequest message CheckAndMutateRowRequest { // The unique name of the table to which the conditional mutation should be // applied. string table_name = 1; // The key of the row to which the conditional mutation should be applied. bytes row_key = 2; // Changes to be atomically applied to the specified row if "predicate_filter" // yields at least one cell when applied to "row_key". Entries are applied in // order, meaning that earlier mutations can be masked by later ones. // Must contain at least one entry if "false_mutations" is empty. repeated Mutation true_mutations = 4; // Changes to be atomically applied to the specified row if "predicate_filter" // does not yield any cells when applied to "row_key". Entries are applied in // order, meaning that earlier mutations can be masked by later ones. // Must contain at least one entry if "true_mutations" is empty. repeated Mutation false_mutations = 5; // The filter to be applied to the contents of the specified row, in the // deprecated string format. Depending on whether or not any results are // yielded, either "true_mutations" or "false_mutations" will be executed. If // unset, checks that the row contains any values at all. string DEPRECATED_field_3 = 3; // The filter to be applied to the contents of the specified row. Depending // on whether or not any results are yielded, either "true_mutations" or // "false_mutations" will be executed. If unset, checks that the row contains // any values at all. RowFilter predicate_filter = 6; } // Response message for BigtableService.CheckAndMutateRowRequest. message CheckAndMutateRowResponse { // Whether or not the request's "predicate_filter" yielded any results for // the specified row. bool predicate_matched = 1; } // Request message for BigtableService.ReadModifyWriteRowRequest. message ReadModifyWriteRowRequest { // The unique name of the table to which the read/modify/write rules should be // applied. string table_name = 1; // The key of the row to which the read/modify/write rules should be applied. bytes row_key = 2; // Rules specifying how the specified row's contents are to be transformed // into writes. Entries are applied in order, meaning that earlier rules will // affect the results of later ones. repeated ReadModifyWriteRule rules = 3; } ================================================ FILE: vendor/google.golang.org/cloud/bigtable/internal/table_data_proto/bigtable_table_data.pb.go ================================================ // Code generated by protoc-gen-go. // source: google.golang.org/cloud/bigtable/internal/table_data_proto/bigtable_table_data.proto // DO NOT EDIT! /* Package google_bigtable_admin_table_v1 is a generated protocol buffer package. It is generated from these files: google.golang.org/cloud/bigtable/internal/table_data_proto/bigtable_table_data.proto It has these top-level messages: Table ColumnFamily */ package google_bigtable_admin_table_v1 import proto "github.com/golang/protobuf/proto" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal type Table_TimestampGranularity int32 const ( Table_MILLIS Table_TimestampGranularity = 0 ) var Table_TimestampGranularity_name = map[int32]string{ 0: "MILLIS", } var Table_TimestampGranularity_value = map[string]int32{ "MILLIS": 0, } func (x Table_TimestampGranularity) String() string { return proto.EnumName(Table_TimestampGranularity_name, int32(x)) } // A collection of user data indexed by row, column, and timestamp. // Each table is served using the resources of its parent cluster. type Table struct { // A unique identifier of the form // /tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]* Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // The column families configured for this table, mapped by column family id. ColumnFamilies map[string]*ColumnFamily `protobuf:"bytes,3,rep,name=column_families" json:"column_families,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // The granularity (e.g. MILLIS, MICROS) at which timestamps are stored in // this table. Timestamps not matching the granularity will be rejected. // Cannot be changed once the table is created. Granularity Table_TimestampGranularity `protobuf:"varint,4,opt,name=granularity,enum=google.bigtable.admin.table.v1.Table_TimestampGranularity" json:"granularity,omitempty"` } func (m *Table) Reset() { *m = Table{} } func (m *Table) String() string { return proto.CompactTextString(m) } func (*Table) ProtoMessage() {} func (m *Table) GetColumnFamilies() map[string]*ColumnFamily { if m != nil { return m.ColumnFamilies } return nil } // A set of columns within a table which share a common configuration. type ColumnFamily struct { // A unique identifier of the form /families/[-_.a-zA-Z0-9]+ // The last segment is the same as the "name" field in // google.bigtable.v1.Family. Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // Garbage collection expression specified by the following grammar: // GC = EXPR // | "" ; // EXPR = EXPR, "||", EXPR (* lowest precedence *) // | EXPR, "&&", EXPR // | "(", EXPR, ")" (* highest precedence *) // | PROP ; // PROP = "version() >", NUM32 // | "age() >", NUM64, [ UNIT ] ; // NUM32 = non-zero-digit { digit } ; (* # NUM32 <= 2^32 - 1 *) // NUM64 = non-zero-digit { digit } ; (* # NUM64 <= 2^63 - 1 *) // UNIT = "d" | "h" | "m" (* d=days, h=hours, m=minutes, else micros *) // GC expressions can be up to 500 characters in length // // The different types of PROP are defined as follows: // version() - cell index, counting from most recent and starting at 1 // age() - age of the cell (current time minus cell timestamp) // // Example: "version() > 3 || (age() > 3d && version() > 1)" // drop cells beyond the most recent three, and drop cells older than three // days unless they're the most recent cell in the row/column // // Garbage collection executes opportunistically in the background, and so // it's possible for reads to return a cell even if it matches the active GC // expression for its family. GcExpression string `protobuf:"bytes,2,opt,name=gc_expression" json:"gc_expression,omitempty"` } func (m *ColumnFamily) Reset() { *m = ColumnFamily{} } func (m *ColumnFamily) String() string { return proto.CompactTextString(m) } func (*ColumnFamily) ProtoMessage() {} func init() { proto.RegisterEnum("google.bigtable.admin.table.v1.Table_TimestampGranularity", Table_TimestampGranularity_name, Table_TimestampGranularity_value) } ================================================ FILE: vendor/google.golang.org/cloud/bigtable/internal/table_data_proto/bigtable_table_data.proto ================================================ // Copyright (c) 2015, Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package google.bigtable.admin.table.v1; option java_multiple_files = true; option java_outer_classname = "BigtableTableDataProto"; option java_package = "com.google.bigtable.admin.table.v1"; // A collection of user data indexed by row, column, and timestamp. // Each table is served using the resources of its parent cluster. message Table { enum TimestampGranularity { MILLIS = 0; } // A unique identifier of the form // /tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]* string name = 1; // If this Table is in the process of being created, the Operation used to // track its progress. As long as this operation is present, the Table will // not accept any Table Admin or Read/Write requests. // The column families configured for this table, mapped by column family id. map column_families = 3; // The granularity (e.g. MILLIS, MICROS) at which timestamps are stored in // this table. Timestamps not matching the granularity will be rejected. // Cannot be changed once the table is created. TimestampGranularity granularity = 4; } // A set of columns within a table which share a common configuration. message ColumnFamily { // A unique identifier of the form /families/[-_.a-zA-Z0-9]+ // The last segment is the same as the "name" field in // google.bigtable.v1.Family. string name = 1; // Garbage collection expression specified by the following grammar: // GC = EXPR // | "" ; // EXPR = EXPR, "||", EXPR (* lowest precedence *) // | EXPR, "&&", EXPR // | "(", EXPR, ")" (* highest precedence *) // | PROP ; // PROP = "version() >", NUM32 // | "age() >", NUM64, [ UNIT ] ; // NUM32 = non-zero-digit { digit } ; (* # NUM32 <= 2^32 - 1 *) // NUM64 = non-zero-digit { digit } ; (* # NUM64 <= 2^63 - 1 *) // UNIT = "d" | "h" | "m" (* d=days, h=hours, m=minutes, else micros *) // GC expressions can be up to 500 characters in length // // The different types of PROP are defined as follows: // version() - cell index, counting from most recent and starting at 1 // age() - age of the cell (current time minus cell timestamp) // // Example: "version() > 3 || (age() > 3d && version() > 1)" // drop cells beyond the most recent three, and drop cells older than three // days unless they're the most recent cell in the row/column // // Garbage collection executes opportunistically in the background, and so // it's possible for reads to return a cell even if it matches the active GC // expression for its family. string gc_expression = 2; } ================================================ FILE: vendor/google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service.pb.go ================================================ // Code generated by protoc-gen-go. // source: google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service.proto // DO NOT EDIT! package google_bigtable_admin_table_v1 import proto "github.com/golang/protobuf/proto" import google_bigtable_admin_table_v11 "google.golang.org/cloud/bigtable/internal/table_data_proto" import google_protobuf "google.golang.org/cloud/bigtable/internal/empty" import ( context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal func init() { } // Client API for BigtableTableService service type BigtableTableServiceClient interface { // Creates a new table, to be served from a specified cluster. // The table can be created with a full set of initial column families, // specified in the request. CreateTable(ctx context.Context, in *CreateTableRequest, opts ...grpc.CallOption) (*google_bigtable_admin_table_v11.Table, error) // Lists the names of all tables served from a specified cluster. ListTables(ctx context.Context, in *ListTablesRequest, opts ...grpc.CallOption) (*ListTablesResponse, error) // Gets the schema of the specified table, including its column families. GetTable(ctx context.Context, in *GetTableRequest, opts ...grpc.CallOption) (*google_bigtable_admin_table_v11.Table, error) // Permanently deletes a specified table and all of its data. DeleteTable(ctx context.Context, in *DeleteTableRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) // Changes the name of a specified table. // Cannot be used to move tables between clusters, zones, or projects. RenameTable(ctx context.Context, in *RenameTableRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) // Creates a new column family within a specified table. CreateColumnFamily(ctx context.Context, in *CreateColumnFamilyRequest, opts ...grpc.CallOption) (*google_bigtable_admin_table_v11.ColumnFamily, error) // Changes the configuration of a specified column family. UpdateColumnFamily(ctx context.Context, in *google_bigtable_admin_table_v11.ColumnFamily, opts ...grpc.CallOption) (*google_bigtable_admin_table_v11.ColumnFamily, error) // Permanently deletes a specified column family and all of its data. DeleteColumnFamily(ctx context.Context, in *DeleteColumnFamilyRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) } type bigtableTableServiceClient struct { cc *grpc.ClientConn } func NewBigtableTableServiceClient(cc *grpc.ClientConn) BigtableTableServiceClient { return &bigtableTableServiceClient{cc} } func (c *bigtableTableServiceClient) CreateTable(ctx context.Context, in *CreateTableRequest, opts ...grpc.CallOption) (*google_bigtable_admin_table_v11.Table, error) { out := new(google_bigtable_admin_table_v11.Table) err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/CreateTable", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *bigtableTableServiceClient) ListTables(ctx context.Context, in *ListTablesRequest, opts ...grpc.CallOption) (*ListTablesResponse, error) { out := new(ListTablesResponse) err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/ListTables", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *bigtableTableServiceClient) GetTable(ctx context.Context, in *GetTableRequest, opts ...grpc.CallOption) (*google_bigtable_admin_table_v11.Table, error) { out := new(google_bigtable_admin_table_v11.Table) err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/GetTable", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *bigtableTableServiceClient) DeleteTable(ctx context.Context, in *DeleteTableRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) { out := new(google_protobuf.Empty) err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/DeleteTable", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *bigtableTableServiceClient) RenameTable(ctx context.Context, in *RenameTableRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) { out := new(google_protobuf.Empty) err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/RenameTable", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *bigtableTableServiceClient) CreateColumnFamily(ctx context.Context, in *CreateColumnFamilyRequest, opts ...grpc.CallOption) (*google_bigtable_admin_table_v11.ColumnFamily, error) { out := new(google_bigtable_admin_table_v11.ColumnFamily) err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/CreateColumnFamily", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *bigtableTableServiceClient) UpdateColumnFamily(ctx context.Context, in *google_bigtable_admin_table_v11.ColumnFamily, opts ...grpc.CallOption) (*google_bigtable_admin_table_v11.ColumnFamily, error) { out := new(google_bigtable_admin_table_v11.ColumnFamily) err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/UpdateColumnFamily", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *bigtableTableServiceClient) DeleteColumnFamily(ctx context.Context, in *DeleteColumnFamilyRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) { out := new(google_protobuf.Empty) err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/DeleteColumnFamily", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } // Server API for BigtableTableService service type BigtableTableServiceServer interface { // Creates a new table, to be served from a specified cluster. // The table can be created with a full set of initial column families, // specified in the request. CreateTable(context.Context, *CreateTableRequest) (*google_bigtable_admin_table_v11.Table, error) // Lists the names of all tables served from a specified cluster. ListTables(context.Context, *ListTablesRequest) (*ListTablesResponse, error) // Gets the schema of the specified table, including its column families. GetTable(context.Context, *GetTableRequest) (*google_bigtable_admin_table_v11.Table, error) // Permanently deletes a specified table and all of its data. DeleteTable(context.Context, *DeleteTableRequest) (*google_protobuf.Empty, error) // Changes the name of a specified table. // Cannot be used to move tables between clusters, zones, or projects. RenameTable(context.Context, *RenameTableRequest) (*google_protobuf.Empty, error) // Creates a new column family within a specified table. CreateColumnFamily(context.Context, *CreateColumnFamilyRequest) (*google_bigtable_admin_table_v11.ColumnFamily, error) // Changes the configuration of a specified column family. UpdateColumnFamily(context.Context, *google_bigtable_admin_table_v11.ColumnFamily) (*google_bigtable_admin_table_v11.ColumnFamily, error) // Permanently deletes a specified column family and all of its data. DeleteColumnFamily(context.Context, *DeleteColumnFamilyRequest) (*google_protobuf.Empty, error) } func RegisterBigtableTableServiceServer(s *grpc.Server, srv BigtableTableServiceServer) { s.RegisterService(&_BigtableTableService_serviceDesc, srv) } func _BigtableTableService_CreateTable_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { in := new(CreateTableRequest) if err := codec.Unmarshal(buf, in); err != nil { return nil, err } out, err := srv.(BigtableTableServiceServer).CreateTable(ctx, in) if err != nil { return nil, err } return out, nil } func _BigtableTableService_ListTables_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { in := new(ListTablesRequest) if err := codec.Unmarshal(buf, in); err != nil { return nil, err } out, err := srv.(BigtableTableServiceServer).ListTables(ctx, in) if err != nil { return nil, err } return out, nil } func _BigtableTableService_GetTable_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { in := new(GetTableRequest) if err := codec.Unmarshal(buf, in); err != nil { return nil, err } out, err := srv.(BigtableTableServiceServer).GetTable(ctx, in) if err != nil { return nil, err } return out, nil } func _BigtableTableService_DeleteTable_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { in := new(DeleteTableRequest) if err := codec.Unmarshal(buf, in); err != nil { return nil, err } out, err := srv.(BigtableTableServiceServer).DeleteTable(ctx, in) if err != nil { return nil, err } return out, nil } func _BigtableTableService_RenameTable_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { in := new(RenameTableRequest) if err := codec.Unmarshal(buf, in); err != nil { return nil, err } out, err := srv.(BigtableTableServiceServer).RenameTable(ctx, in) if err != nil { return nil, err } return out, nil } func _BigtableTableService_CreateColumnFamily_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { in := new(CreateColumnFamilyRequest) if err := codec.Unmarshal(buf, in); err != nil { return nil, err } out, err := srv.(BigtableTableServiceServer).CreateColumnFamily(ctx, in) if err != nil { return nil, err } return out, nil } func _BigtableTableService_UpdateColumnFamily_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { in := new(google_bigtable_admin_table_v11.ColumnFamily) if err := codec.Unmarshal(buf, in); err != nil { return nil, err } out, err := srv.(BigtableTableServiceServer).UpdateColumnFamily(ctx, in) if err != nil { return nil, err } return out, nil } func _BigtableTableService_DeleteColumnFamily_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { in := new(DeleteColumnFamilyRequest) if err := codec.Unmarshal(buf, in); err != nil { return nil, err } out, err := srv.(BigtableTableServiceServer).DeleteColumnFamily(ctx, in) if err != nil { return nil, err } return out, nil } var _BigtableTableService_serviceDesc = grpc.ServiceDesc{ ServiceName: "google.bigtable.admin.table.v1.BigtableTableService", HandlerType: (*BigtableTableServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "CreateTable", Handler: _BigtableTableService_CreateTable_Handler, }, { MethodName: "ListTables", Handler: _BigtableTableService_ListTables_Handler, }, { MethodName: "GetTable", Handler: _BigtableTableService_GetTable_Handler, }, { MethodName: "DeleteTable", Handler: _BigtableTableService_DeleteTable_Handler, }, { MethodName: "RenameTable", Handler: _BigtableTableService_RenameTable_Handler, }, { MethodName: "CreateColumnFamily", Handler: _BigtableTableService_CreateColumnFamily_Handler, }, { MethodName: "UpdateColumnFamily", Handler: _BigtableTableService_UpdateColumnFamily_Handler, }, { MethodName: "DeleteColumnFamily", Handler: _BigtableTableService_DeleteColumnFamily_Handler, }, }, Streams: []grpc.StreamDesc{}, } ================================================ FILE: vendor/google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service.proto ================================================ // Copyright (c) 2015, Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package google.bigtable.admin.table.v1; import "google.golang.org/cloud/bigtable/internal/table_data_proto/bigtable_table_data.proto"; import "google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service_messages.proto"; import "google.golang.org/cloud/bigtable/internal/empty/empty.proto"; option java_generic_services = true; option java_multiple_files = true; option java_outer_classname = "BigtableTableServicesProto"; option java_package = "com.google.bigtable.admin.table.v1"; // Service for creating, configuring, and deleting Cloud Bigtable tables. // Provides access to the table schemas only, not the data stored within the tables. service BigtableTableService { // Creates a new table, to be served from a specified cluster. // The table can be created with a full set of initial column families, // specified in the request. rpc CreateTable(CreateTableRequest) returns (Table) { } // Lists the names of all tables served from a specified cluster. rpc ListTables(ListTablesRequest) returns (ListTablesResponse) { } // Gets the schema of the specified table, including its column families. rpc GetTable(GetTableRequest) returns (Table) { } // Permanently deletes a specified table and all of its data. rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty) { } // Changes the name of a specified table. // Cannot be used to move tables between clusters, zones, or projects. rpc RenameTable(RenameTableRequest) returns (google.protobuf.Empty) { } // Creates a new column family within a specified table. rpc CreateColumnFamily(CreateColumnFamilyRequest) returns (ColumnFamily) { } // Changes the configuration of a specified column family. rpc UpdateColumnFamily(ColumnFamily) returns (ColumnFamily) { } // Permanently deletes a specified column family and all of its data. rpc DeleteColumnFamily(DeleteColumnFamilyRequest) returns (google.protobuf.Empty) { } } ================================================ FILE: vendor/google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service_messages.pb.go ================================================ // Code generated by protoc-gen-go. // source: google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service_messages.proto // DO NOT EDIT! /* Package google_bigtable_admin_table_v1 is a generated protocol buffer package. It is generated from these files: google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service_messages.proto google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service.proto It has these top-level messages: CreateTableRequest ListTablesRequest ListTablesResponse GetTableRequest DeleteTableRequest RenameTableRequest CreateColumnFamilyRequest DeleteColumnFamilyRequest */ package google_bigtable_admin_table_v1 import proto "github.com/golang/protobuf/proto" import google_bigtable_admin_table_v11 "google.golang.org/cloud/bigtable/internal/table_data_proto" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal type CreateTableRequest struct { // The unique name of the cluster in which to create the new table. Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // The name by which the new table should be referred to within the cluster, // e.g. "foobar" rather than "/tables/foobar". TableId string `protobuf:"bytes,2,opt,name=table_id" json:"table_id,omitempty"` // The Table to create. The `name` field of the Table and all of its // ColumnFamilies must be left blank, and will be populated in the response. Table *google_bigtable_admin_table_v11.Table `protobuf:"bytes,3,opt,name=table" json:"table,omitempty"` } func (m *CreateTableRequest) Reset() { *m = CreateTableRequest{} } func (m *CreateTableRequest) String() string { return proto.CompactTextString(m) } func (*CreateTableRequest) ProtoMessage() {} func (m *CreateTableRequest) GetTable() *google_bigtable_admin_table_v11.Table { if m != nil { return m.Table } return nil } type ListTablesRequest struct { // The unique name of the cluster for which tables should be listed. Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` } func (m *ListTablesRequest) Reset() { *m = ListTablesRequest{} } func (m *ListTablesRequest) String() string { return proto.CompactTextString(m) } func (*ListTablesRequest) ProtoMessage() {} type ListTablesResponse struct { // The tables present in the requested cluster. // At present, only the names of the tables are populated. Tables []*google_bigtable_admin_table_v11.Table `protobuf:"bytes,1,rep,name=tables" json:"tables,omitempty"` } func (m *ListTablesResponse) Reset() { *m = ListTablesResponse{} } func (m *ListTablesResponse) String() string { return proto.CompactTextString(m) } func (*ListTablesResponse) ProtoMessage() {} func (m *ListTablesResponse) GetTables() []*google_bigtable_admin_table_v11.Table { if m != nil { return m.Tables } return nil } type GetTableRequest struct { // The unique name of the requested table. Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` } func (m *GetTableRequest) Reset() { *m = GetTableRequest{} } func (m *GetTableRequest) String() string { return proto.CompactTextString(m) } func (*GetTableRequest) ProtoMessage() {} type DeleteTableRequest struct { // The unique name of the table to be deleted. Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` } func (m *DeleteTableRequest) Reset() { *m = DeleteTableRequest{} } func (m *DeleteTableRequest) String() string { return proto.CompactTextString(m) } func (*DeleteTableRequest) ProtoMessage() {} type RenameTableRequest struct { // The current unique name of the table. Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // The new name by which the table should be referred to within its containing // cluster, e.g. "foobar" rather than "/tables/foobar". NewId string `protobuf:"bytes,2,opt,name=new_id" json:"new_id,omitempty"` } func (m *RenameTableRequest) Reset() { *m = RenameTableRequest{} } func (m *RenameTableRequest) String() string { return proto.CompactTextString(m) } func (*RenameTableRequest) ProtoMessage() {} type CreateColumnFamilyRequest struct { // The unique name of the table in which to create the new column family. Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // The name by which the new column family should be referred to within the // table, e.g. "foobar" rather than "/columnFamilies/foobar". ColumnFamilyId string `protobuf:"bytes,2,opt,name=column_family_id" json:"column_family_id,omitempty"` // The column family to create. The `name` field must be left blank. ColumnFamily *google_bigtable_admin_table_v11.ColumnFamily `protobuf:"bytes,3,opt,name=column_family" json:"column_family,omitempty"` } func (m *CreateColumnFamilyRequest) Reset() { *m = CreateColumnFamilyRequest{} } func (m *CreateColumnFamilyRequest) String() string { return proto.CompactTextString(m) } func (*CreateColumnFamilyRequest) ProtoMessage() {} func (m *CreateColumnFamilyRequest) GetColumnFamily() *google_bigtable_admin_table_v11.ColumnFamily { if m != nil { return m.ColumnFamily } return nil } type DeleteColumnFamilyRequest struct { // The unique name of the column family to be deleted. Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` } func (m *DeleteColumnFamilyRequest) Reset() { *m = DeleteColumnFamilyRequest{} } func (m *DeleteColumnFamilyRequest) String() string { return proto.CompactTextString(m) } func (*DeleteColumnFamilyRequest) ProtoMessage() {} func init() { } ================================================ FILE: vendor/google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service_messages.proto ================================================ // Copyright (c) 2015, Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package google.bigtable.admin.table.v1; import "google.golang.org/cloud/bigtable/internal/table_data_proto/bigtable_table_data.proto"; option java_multiple_files = true; option java_outer_classname = "BigtableTableServiceMessagesProto"; option java_package = "com.google.bigtable.admin.table.v1"; message CreateTableRequest { // The unique name of the cluster in which to create the new table. string name = 1; // The name by which the new table should be referred to within the cluster, // e.g. "foobar" rather than "/tables/foobar". string table_id = 2; // The Table to create. The `name` field of the Table and all of its // ColumnFamilies must be left blank, and will be populated in the response. Table table = 3; } message ListTablesRequest { // The unique name of the cluster for which tables should be listed. string name = 1; } message ListTablesResponse { // The tables present in the requested cluster. // At present, only the names of the tables are populated. repeated Table tables = 1; } message GetTableRequest { // The unique name of the requested table. string name = 1; } message DeleteTableRequest { // The unique name of the table to be deleted. string name = 1; } message RenameTableRequest { // The current unique name of the table. string name = 1; // The new name by which the table should be referred to within its containing // cluster, e.g. "foobar" rather than "/tables/foobar". string new_id = 2; } message CreateColumnFamilyRequest { // The unique name of the table in which to create the new column family. string name = 1; // The name by which the new column family should be referred to within the // table, e.g. "foobar" rather than "/columnFamilies/foobar". string column_family_id = 2; // The column family to create. The `name` field must be left blank. ColumnFamily column_family = 3; } message DeleteColumnFamilyRequest { // The unique name of the column family to be deleted. string name = 1; } ================================================ FILE: vendor/google.golang.org/cloud/bigtable/sample/search.go ================================================ /* Copyright 2015 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // This is a sample web server that uses Cloud Bigtable as the storage layer // for a simple document-storage and full-text-search service. // It has three functions: // - Add a document. This adds the content of a user-supplied document to the // Bigtable, and adds references to the document to an index in the Bigtable. // The document is indexed under each unique word in the document. // - Search the index. This returns documents containing each word in a user // query, with snippets and links to view the whole document. // - Clear the table. This deletes and recreates the Bigtable, package main import ( "bytes" "flag" "fmt" "html/template" "io" "log" "net/http" "os" "strings" "sync" "time" "unicode" "golang.org/x/net/context" "google.golang.org/cloud/bigtable" ) var ( project = flag.String("project", "", "The name of the project.") zone = flag.String("zone", "", "The zone of the project.") cluster = flag.String("cluster", "", "The name of the Cloud Bigtable cluster.") tableName = flag.String("table", "docindex", "The name of the table containing the documents and index.") credFile = flag.String("creds", "", "File containing credentials") rebuild = flag.Bool("rebuild", false, "Rebuild the table from scratch on startup.") client *bigtable.Client adminClient *bigtable.AdminClient table *bigtable.Table addTemplate = template.Must(template.New("").Parse(` Added {{.Title}} `)) contentTemplate = template.Must(template.New("").Parse(` {{.Title}}

{{.Content}} `)) searchTemplate = template.Must(template.New("").Parse(` Results for {{.Query}}:

{{range .Results}} {{.Title}}
{{.Snippet}}

{{end}} `)) ) const ( // prototypeTableName is an existing table containing some documents. // Rebuilding a table will populate it with the data from this table. prototypeTableName = "shakespearetemplate" indexColumnFamily = "i" contentColumnFamily = "c" mainPage = ` Document Search Search for documents:
Add a document:
Document name:
Document text:
Rebuild table:
` ) func main() { flag.Parse() if *tableName == prototypeTableName { log.Fatal("Can't use " + prototypeTableName + " as your table.") } // Let the library get credentials from file. os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", *credFile) // Make an admin client. var err error if adminClient, err = bigtable.NewAdminClient(context.Background(), *project, *zone, *cluster); err != nil { log.Fatal("Bigtable NewAdminClient:", err) } // Make a regular client. client, err = bigtable.NewClient(context.Background(), *project, *zone, *cluster) if err != nil { log.Fatal("Bigtable NewClient:", err) } // Open the table. table = client.Open(*tableName) // Rebuild the table if the command-line flag is set. if *rebuild { if err := rebuildTable(); err != nil { log.Fatal(err) } } // Set up HTML handlers, and start the web server. http.HandleFunc("/search", handleSearch) http.HandleFunc("/content", handleContent) http.HandleFunc("/add", handleAddDoc) http.HandleFunc("/clearindex", handleClear) http.HandleFunc("/", handleMain) log.Fatal(http.ListenAndServe(":8080", nil)) } // handleMain outputs the home page, containing a search box, an "add document" box, and "clear table" button. func handleMain(w http.ResponseWriter, r *http.Request) { io.WriteString(w, mainPage) } // tokenize splits a string into tokens. // This is very simple, it's not a good tokenization function. func tokenize(s string) []string { wordMap := make(map[string]bool) f := strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsLetter(r) }) for _, word := range f { word = strings.ToLower(word) wordMap[word] = true } words := make([]string, 0, len(wordMap)) for word := range wordMap { words = append(words, word) } return words } // handleContent fetches the content of a document from the Bigtable and returns it. func handleContent(w http.ResponseWriter, r *http.Request) { ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) name := r.FormValue("name") if len(name) == 0 { http.Error(w, "No document name supplied.", http.StatusBadRequest) return } row, err := table.ReadRow(ctx, name) if err != nil { http.Error(w, "Error reading content: "+err.Error(), http.StatusInternalServerError) return } content := row[contentColumnFamily] if len(content) == 0 { http.Error(w, "Document not found.", http.StatusNotFound) return } var buf bytes.Buffer if err := contentTemplate.ExecuteTemplate(&buf, "", struct{ Title, Content string }{name, string(content[0].Value)}); err != nil { http.Error(w, "Error executing HTML template: "+err.Error(), http.StatusInternalServerError) return } io.Copy(w, &buf) } // handleSearch responds to search queries, returning links and snippets for matching documents. func handleSearch(w http.ResponseWriter, r *http.Request) { ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) query := r.FormValue("q") // Split the query into words. words := tokenize(query) if len(words) == 0 { http.Error(w, "Empty query.", http.StatusBadRequest) return } // readRows reads from many rows concurrently. readRows := func(rows []string) ([]bigtable.Row, error) { results := make([]bigtable.Row, len(rows)) errors := make([]error, len(rows)) var wg sync.WaitGroup for i, row := range rows { wg.Add(1) go func(i int, row string) { defer wg.Done() results[i], errors[i] = table.ReadRow(ctx, row) }(i, row) } wg.Wait() for _, err := range errors { if err != nil { return nil, err } } return results, nil } // For each query word, get the list of documents containing it. results, err := readRows(words) if err != nil { http.Error(w, "Error reading index: "+err.Error(), http.StatusInternalServerError) return } // Count how many of the query words each result contained. hits := make(map[string]int) for _, r := range results { for _, r := range r[indexColumnFamily] { hits[r.Column]++ } } // Build a slice of all the documents that matched every query word. var matches []string for doc, count := range hits { if count == len(words) { matches = append(matches, doc[len(indexColumnFamily+":"):]) } } // Fetch the content of those documents from the Bigtable. content, err := readRows(matches) if err != nil { http.Error(w, "Error reading results: "+err.Error(), http.StatusInternalServerError) return } type result struct{ Title, Snippet string } data := struct { Query string Results []result }{query, nil} // Output links and snippets. for i, doc := range matches { var text string c := content[i][contentColumnFamily] if len(c) > 0 { text = string(c[0].Value) } if len(text) > 100 { text = text[:100] + "..." } data.Results = append(data.Results, result{doc, text}) } var buf bytes.Buffer if err := searchTemplate.ExecuteTemplate(&buf, "", data); err != nil { http.Error(w, "Error executing HTML template: "+err.Error(), http.StatusInternalServerError) return } io.Copy(w, &buf) } // handleAddDoc adds a document to the index. func handleAddDoc(w http.ResponseWriter, r *http.Request) { if r.Method != "POST" { http.Error(w, "POST requests only", http.StatusMethodNotAllowed) return } ctx, _ := context.WithTimeout(context.Background(), time.Minute) name := r.FormValue("name") if len(name) == 0 { http.Error(w, "Empty document name!", http.StatusBadRequest) return } content := r.FormValue("content") if len(content) == 0 { http.Error(w, "Empty document content!", http.StatusBadRequest) return } var ( writeErr error // Set if any write fails. mu sync.Mutex // Protects writeErr wg sync.WaitGroup // Used to wait for all writes to finish. ) // writeOneColumn writes one column in one row, updates err if there is an error, // and signals wg that one operation has finished. writeOneColumn := func(row, family, column, value string, ts bigtable.Timestamp) { mut := bigtable.NewMutation() mut.Set(family, column, ts, []byte(value)) err := table.Apply(ctx, row, mut) if err != nil { mu.Lock() writeErr = err mu.Unlock() } } // Start a write to store the document content. wg.Add(1) go func() { writeOneColumn(name, contentColumnFamily, "", content, bigtable.Now()) wg.Done() }() // Start writes to store the document name in the index for each word in the document. words := tokenize(content) for _, word := range words { var ( row = word family = indexColumnFamily column = name value = "" ts = bigtable.Now() ) wg.Add(1) go func() { // TODO: should use a semaphore to limit the number of concurrent writes. writeOneColumn(row, family, column, value, ts) wg.Done() }() } wg.Wait() if writeErr != nil { http.Error(w, "Error writing to Bigtable: "+writeErr.Error(), http.StatusInternalServerError) return } var buf bytes.Buffer if err := addTemplate.ExecuteTemplate(&buf, "", struct{ Title string }{name}); err != nil { http.Error(w, "Error executing HTML template: "+err.Error(), http.StatusInternalServerError) return } io.Copy(w, &buf) } // rebuildTable deletes the table if it exists, then creates the table, with the index column family. func rebuildTable() error { ctx, _ := context.WithTimeout(context.Background(), 5*time.Minute) adminClient.DeleteTable(ctx, *tableName) if err := adminClient.CreateTable(ctx, *tableName); err != nil { return fmt.Errorf("CreateTable: %v", err) } time.Sleep(20 * time.Second) if err := adminClient.CreateColumnFamily(ctx, *tableName, indexColumnFamily); err != nil { return fmt.Errorf("CreateColumnFamily: %v", err) } if err := adminClient.CreateColumnFamily(ctx, *tableName, contentColumnFamily); err != nil { return fmt.Errorf("CreateColumnFamily: %v", err) } // Open the prototype table. It contains a number of documents to get started with. prototypeTable := client.Open(prototypeTableName) var ( writeErr error // Set if any write fails. mu sync.Mutex // Protects writeErr wg sync.WaitGroup // Used to wait for all writes to finish. ) copyRowToTable := func(row bigtable.Row) bool { mu.Lock() failed := writeErr != nil mu.Unlock() if failed { return false } mut := bigtable.NewMutation() for family, items := range row { for _, item := range items { // Get the column name, excluding the column family name and ':' character. columnWithoutFamily := item.Column[len(family)+1:] mut.Set(family, columnWithoutFamily, bigtable.Now(), item.Value) } } wg.Add(1) go func() { // TODO: should use a semaphore to limit the number of concurrent writes. if err := table.Apply(ctx, row.Key(), mut); err != nil { mu.Lock() writeErr = err mu.Unlock() } wg.Done() }() return true } // Create a filter that only accepts the column families we're interested in. filter := bigtable.FamilyFilter(indexColumnFamily + "|" + contentColumnFamily) // Read every row from prototypeTable, and call copyRowToTable to copy it to our table. err := prototypeTable.ReadRows(ctx, bigtable.InfiniteRange(""), copyRowToTable, bigtable.RowFilter(filter)) wg.Wait() if err != nil { return err } return writeErr } // handleClear calls rebuildTable func handleClear(w http.ResponseWriter, r *http.Request) { if r.Method != "POST" { http.Error(w, "POST requests only", http.StatusMethodNotAllowed) return } if err := rebuildTable(); err != nil { http.Error(w, "Failed to rebuild index: "+err.Error(), http.StatusInternalServerError) return } fmt.Fprint(w, "Rebuilt index.\n") } ================================================ FILE: vendor/google.golang.org/cloud/cloud.go ================================================ // Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package cloud contains Google Cloud Platform APIs related types // and common functions. package cloud import ( "net/http" "golang.org/x/net/context" "google.golang.org/cloud/internal" ) // NewContext returns a new context that uses the provided http.Client. // Provided http.Client is responsible to authorize and authenticate // the requests made to the Google Cloud APIs. // It mutates the client's original Transport to append the cloud // package's user-agent to the outgoing requests. // You can obtain the project ID from the Google Developers Console, // https://console.developers.google.com. func NewContext(projID string, c *http.Client) context.Context { if c == nil { panic("invalid nil *http.Client passed to NewContext") } return WithContext(context.Background(), projID, c) } // WithContext returns a new context in a similar way NewContext does, // but initiates the new context with the specified parent. func WithContext(parent context.Context, projID string, c *http.Client) context.Context { // TODO(bradfitz): delete internal.Transport. It's too wrappy for what it does. // Do User-Agent some other way. if _, ok := c.Transport.(*internal.Transport); !ok { c.Transport = &internal.Transport{Base: c.Transport} } return internal.WithContext(parent, projID, c) } ================================================ FILE: vendor/google.golang.org/cloud/compute/metadata/metadata.go ================================================ // Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package metadata provides access to Google Compute Engine (GCE) // metadata and API service accounts. // // This package is a wrapper around the GCE metadata service, // as documented at https://developers.google.com/compute/docs/metadata. package metadata import ( "encoding/json" "fmt" "io/ioutil" "net" "net/http" "os" "strings" "sync" "time" "google.golang.org/cloud/internal" ) type cachedValue struct { k string trim bool mu sync.Mutex v string } var ( projID = &cachedValue{k: "project/project-id", trim: true} projNum = &cachedValue{k: "project/numeric-project-id", trim: true} instID = &cachedValue{k: "instance/id", trim: true} ) var metaClient = &http.Client{ Transport: &internal.Transport{ Base: &http.Transport{ Dial: (&net.Dialer{ Timeout: 750 * time.Millisecond, KeepAlive: 30 * time.Second, }).Dial, ResponseHeaderTimeout: 750 * time.Millisecond, }, }, } // NotDefinedError is returned when requested metadata is not defined. // // The underlying string is the suffix after "/computeMetadata/v1/". // // This error is not returned if the value is defined to be the empty // string. type NotDefinedError string func (suffix NotDefinedError) Error() string { return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) } // Get returns a value from the metadata service. // The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". // // If the GCE_METADATA_HOST environment variable is not defined, a default of // 169.254.169.254 will be used instead. // // If the requested metadata is not defined, the returned error will // be of type NotDefinedError. func Get(suffix string) (string, error) { // Using a fixed IP makes it very difficult to spoof the metadata service in // a container, which is an important use-case for local testing of cloud // deployments. To enable spoofing of the metadata service, the environment // variable GCE_METADATA_HOST is first inspected to decide where metadata // requests shall go. host := os.Getenv("GCE_METADATA_HOST") if host == "" { // Using 169.254.169.254 instead of "metadata" here because Go // binaries built with the "netgo" tag and without cgo won't // know the search suffix for "metadata" is // ".google.internal", and this IP address is documented as // being stable anyway. host = "169.254.169.254" } url := "http://" + host + "/computeMetadata/v1/" + suffix req, _ := http.NewRequest("GET", url, nil) req.Header.Set("Metadata-Flavor", "Google") res, err := metaClient.Do(req) if err != nil { return "", err } defer res.Body.Close() if res.StatusCode == http.StatusNotFound { return "", NotDefinedError(suffix) } if res.StatusCode != 200 { return "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url) } all, err := ioutil.ReadAll(res.Body) if err != nil { return "", err } return string(all), nil } func getTrimmed(suffix string) (s string, err error) { s, err = Get(suffix) s = strings.TrimSpace(s) return } func (c *cachedValue) get() (v string, err error) { defer c.mu.Unlock() c.mu.Lock() if c.v != "" { return c.v, nil } if c.trim { v, err = getTrimmed(c.k) } else { v, err = Get(c.k) } if err == nil { c.v = v } return } var onGCE struct { sync.Mutex set bool v bool } // OnGCE reports whether this process is running on Google Compute Engine. func OnGCE() bool { defer onGCE.Unlock() onGCE.Lock() if onGCE.set { return onGCE.v } onGCE.set = true // We use the DNS name of the metadata service here instead of the IP address // because we expect that to fail faster in the not-on-GCE case. res, err := metaClient.Get("http://metadata.google.internal") if err != nil { return false } onGCE.v = res.Header.Get("Metadata-Flavor") == "Google" return onGCE.v } // ProjectID returns the current instance's project ID string. func ProjectID() (string, error) { return projID.get() } // NumericProjectID returns the current instance's numeric project ID. func NumericProjectID() (string, error) { return projNum.get() } // InternalIP returns the instance's primary internal IP address. func InternalIP() (string, error) { return getTrimmed("instance/network-interfaces/0/ip") } // ExternalIP returns the instance's primary external (public) IP address. func ExternalIP() (string, error) { return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") } // Hostname returns the instance's hostname. This will be of the form // ".c..internal". func Hostname() (string, error) { return getTrimmed("instance/hostname") } // InstanceTags returns the list of user-defined instance tags, // assigned when initially creating a GCE instance. func InstanceTags() ([]string, error) { var s []string j, err := Get("instance/tags") if err != nil { return nil, err } if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { return nil, err } return s, nil } // InstanceID returns the current VM's numeric instance ID. func InstanceID() (string, error) { return instID.get() } // InstanceName returns the current VM's instance ID string. func InstanceName() (string, error) { host, err := Hostname() if err != nil { return "", err } return strings.Split(host, ".")[0], nil } // Zone returns the current VM's zone, such as "us-central1-b". func Zone() (string, error) { zone, err := getTrimmed("instance/zone") // zone is of the form "projects//zones/". if err != nil { return "", err } return zone[strings.LastIndex(zone, "/")+1:], nil } // InstanceAttributes returns the list of user-defined attributes, // assigned when initially creating a GCE VM instance. The value of an // attribute can be obtained with InstanceAttributeValue. func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") } // ProjectAttributes returns the list of user-defined attributes // applying to the project as a whole, not just this VM. The value of // an attribute can be obtained with ProjectAttributeValue. func ProjectAttributes() ([]string, error) { return lines("project/attributes/") } func lines(suffix string) ([]string, error) { j, err := Get(suffix) if err != nil { return nil, err } s := strings.Split(strings.TrimSpace(j), "\n") for i := range s { s[i] = strings.TrimSpace(s[i]) } return s, nil } // InstanceAttributeValue returns the value of the provided VM // instance attribute. // // If the requested attribute is not defined, the returned error will // be of type NotDefinedError. // // InstanceAttributeValue may return ("", nil) if the attribute was // defined to be the empty string. func InstanceAttributeValue(attr string) (string, error) { return Get("instance/attributes/" + attr) } // ProjectAttributeValue returns the value of the provided // project attribute. // // If the requested attribute is not defined, the returned error will // be of type NotDefinedError. // // ProjectAttributeValue may return ("", nil) if the attribute was // defined to be the empty string. func ProjectAttributeValue(attr string) (string, error) { return Get("project/attributes/" + attr) } // Scopes returns the service account scopes for the given account. // The account may be empty or the string "default" to use the instance's // main account. func Scopes(serviceAccount string) ([]string, error) { if serviceAccount == "" { serviceAccount = "default" } return lines("instance/service-accounts/" + serviceAccount + "/scopes") } ================================================ FILE: vendor/google.golang.org/cloud/container/container.go ================================================ // Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package container contains a Google Container Engine client. // // For more information about the API, // see https://cloud.google.com/container-engine/docs package container import ( "errors" "net/http" "time" "golang.org/x/net/context" raw "google.golang.org/api/container/v1beta1" "google.golang.org/cloud/internal" ) type Type string var ( TypeCreate Type = Type("createCluster") TypeDelete Type = Type("deleteCluster") ) type Status string var ( Done = Status("done") Pending = Status("pending") Running = Status("running") Error = Status("error") Provisioning = Status("provisioning") Stopping = Status("stopping") ) // Resource is a Google Container Engine cluster resource. type Resource struct { // Name is the name of this cluster. The name must be unique // within this project and zone, and can be up to 40 characters. Name string // Description is the description of the cluster. Optional. Description string // Zone is the Google Compute Engine zone in which the cluster resides. Zone string // Status is the current status of the cluster. It could either be // StatusError, StatusProvisioning, StatusRunning or StatusStopping. Status Status // Num is the number of the nodes in this cluster resource. Num int64 // APIVersion is the version of the Kubernetes master and kubelets running // in this cluster. Allowed value is 0.4.2, or leave blank to // pick up the latest stable release. APIVersion string // Endpoint is the IP address of this cluster's Kubernetes master. // The endpoint can be accessed at https://username:password@endpoint/. // See Username and Password fields for the username and password information. Endpoint string // Username is the username to use when accessing the Kubernetes master endpoint. Username string // Password is the password to use when accessing the Kubernetes master endpoint. Password string // ContainerIPv4CIDR is the IP addresses of the container pods in // this cluster, in CIDR notation (e.g. 1.2.3.4/29). ContainerIPv4CIDR string // ServicesIPv4CIDR is the IP addresses of the Kubernetes services in this // cluster, in CIDR notation (e.g. 1.2.3.4/29). Service addresses are // always in the 10.0.0.0/16 range. ServicesIPv4CIDR string // MachineType is a Google Compute Engine machine type (e.g. n1-standard-1). // If none set, the default type is used while creating a new cluster. MachineType string // SourceImage is the fully-specified name of a Google Compute Engine image. // For example: https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/backports-debian-7-wheezy-vYYYYMMDD. SourceImage string // Created is the creation time of this cluster. Created time.Time } func resourceFromRaw(c *raw.Cluster) *Resource { if c == nil { return nil } r := &Resource{ Name: c.Name, Description: c.Description, Zone: c.Zone, Status: Status(c.Status), Num: c.NumNodes, APIVersion: c.ClusterApiVersion, Endpoint: c.Endpoint, Username: c.MasterAuth.User, Password: c.MasterAuth.Password, ContainerIPv4CIDR: c.ContainerIpv4Cidr, ServicesIPv4CIDR: c.ServicesIpv4Cidr, MachineType: c.NodeConfig.MachineType, SourceImage: c.NodeConfig.SourceImage, } r.Created, _ = time.Parse(time.RFC3339, c.CreationTimestamp) return r } func resourcesFromRaw(c []*raw.Cluster) []*Resource { r := make([]*Resource, len(c)) for i, val := range c { r[i] = resourceFromRaw(val) } return r } // Op represents a Google Container Engine API operation. type Op struct { // Name is the name of the operation. Name string // Zone is the Google Compute Engine zone. Zone string // TargetURL is the URL of the cluster resource // that this operation is associated with. TargetURL string // Type is the operation type. It could be either be TypeCreate or TypeDelete. Type Type // Status is the current status of this operation. It could be either // OpDone or OpPending. Status Status } func opFromRaw(o *raw.Operation) *Op { if o == nil { return nil } return &Op{ Name: o.Name, Zone: o.Zone, TargetURL: o.Target, Type: Type(o.OperationType), Status: Status(o.Status), } } func opsFromRaw(o []*raw.Operation) []*Op { ops := make([]*Op, len(o)) for i, val := range o { ops[i] = opFromRaw(val) } return ops } // Clusters returns a list of cluster resources from the specified zone. // If no zone is specified, it returns all clusters under the user project. func Clusters(ctx context.Context, zone string) ([]*Resource, error) { s := rawService(ctx) if zone == "" { resp, err := s.Projects.Clusters.List(internal.ProjID(ctx)).Do() if err != nil { return nil, err } return resourcesFromRaw(resp.Clusters), nil } resp, err := s.Projects.Zones.Clusters.List(internal.ProjID(ctx), zone).Do() if err != nil { return nil, err } return resourcesFromRaw(resp.Clusters), nil } // Cluster returns metadata about the specified cluster. func Cluster(ctx context.Context, zone, name string) (*Resource, error) { s := rawService(ctx) resp, err := s.Projects.Zones.Clusters.Get(internal.ProjID(ctx), zone, name).Do() if err != nil { return nil, err } return resourceFromRaw(resp), nil } // CreateCluster creates a new cluster with the provided metadata // in the specified zone. func CreateCluster(ctx context.Context, zone string, resource *Resource) (*Resource, error) { panic("not implemented") } // DeleteCluster deletes a cluster. func DeleteCluster(ctx context.Context, zone, name string) error { s := rawService(ctx) _, err := s.Projects.Zones.Clusters.Delete(internal.ProjID(ctx), zone, name).Do() return err } // Operations returns a list of operations from the specified zone. // If no zone is specified, it looks up for all of the operations // that are running under the user's project. func Operations(ctx context.Context, zone string) ([]*Op, error) { s := rawService(ctx) if zone == "" { resp, err := s.Projects.Operations.List(internal.ProjID(ctx)).Do() if err != nil { return nil, err } return opsFromRaw(resp.Operations), nil } resp, err := s.Projects.Zones.Operations.List(internal.ProjID(ctx), zone).Do() if err != nil { return nil, err } return opsFromRaw(resp.Operations), nil } // Operation returns an operation. func Operation(ctx context.Context, zone, name string) (*Op, error) { s := rawService(ctx) resp, err := s.Projects.Zones.Operations.Get(internal.ProjID(ctx), zone, name).Do() if err != nil { return nil, err } if resp.ErrorMessage != "" { return nil, errors.New(resp.ErrorMessage) } return opFromRaw(resp), nil } func rawService(ctx context.Context) *raw.Service { return internal.Service(ctx, "container", func(hc *http.Client) interface{} { svc, _ := raw.New(hc) return svc }).(*raw.Service) } ================================================ FILE: vendor/google.golang.org/cloud/datastore/datastore.go ================================================ // Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package datastore contains a Google Cloud Datastore client. // // This package is experimental and may make backwards-incompatible changes. package datastore import ( "bytes" "errors" "fmt" "io/ioutil" "net/http" "reflect" "github.com/golang/protobuf/proto" "golang.org/x/net/context" "google.golang.org/cloud/internal" pb "google.golang.org/cloud/internal/datastore" ) // ContextKey represents a context key specific to the datastore type ContextKey string const ( // ScopeDatastore grants permissions to view and/or manage datastore entities ScopeDatastore = "https://www.googleapis.com/auth/datastore" // ScopeUserEmail grants permission to view the user's email address. // It is required to access the datastore ScopeUserEmail = "https://www.googleapis.com/auth/userinfo.email" ) var ( // ErrInvalidEntityType is returned when functions like Get or Next are // passed a dst or src argument of invalid type. ErrInvalidEntityType = errors.New("datastore: invalid entity type") // ErrInvalidKey is returned when an invalid key is presented. ErrInvalidKey = errors.New("datastore: invalid key") // ErrNoSuchEntity is returned when no entity was found for a given key. ErrNoSuchEntity = errors.New("datastore: no such entity") ) type multiArgType int const ( multiArgTypeInvalid multiArgType = iota multiArgTypePropertyLoadSaver multiArgTypeStruct multiArgTypeStructPtr multiArgTypeInterface ) // nsKey is the type of the context.Context key to store the datastore // namespace. type nsKey struct{} // WithNamespace returns a new context that limits the scope its parent // context with a Datastore namespace. func WithNamespace(parent context.Context, namespace string) context.Context { return context.WithValue(parent, nsKey{}, namespace) } // ctxNamespace returns the active namespace for a context. // It defaults to "" if no namespace was specified. func ctxNamespace(ctx context.Context) string { v, _ := ctx.Value(nsKey{}).(string) return v } // ErrFieldMismatch is returned when a field is to be loaded into a different // type than the one it was stored from, or when a field is missing or // unexported in the destination struct. // StructType is the type of the struct pointed to by the destination argument // passed to Get or to Iterator.Next. type ErrFieldMismatch struct { StructType reflect.Type FieldName string Reason string } // errHTTP is returned when responds is a non-200 HTTP response. type errHTTP struct { StatusCode int Body string err error } func (e *errHTTP) Error() string { if e.err == nil { return fmt.Sprintf("error during call, http status code: %v %s", e.StatusCode, e.Body) } return e.err.Error() } func (e *ErrFieldMismatch) Error() string { return fmt.Sprintf("datastore: cannot load field %q into a %q: %s", e.FieldName, e.StructType, e.Reason) } // baseUrl gets the base url active for the datastore service // defaults to "https://www.googleapis.com/datastore/v1beta2/datasets/" if none was specified func baseUrl(ctx context.Context) string { v := ctx.Value(ContextKey("base_url")) if v == nil { return "https://www.googleapis.com/datastore/v1beta2/datasets/" } else { return v.(string) } } func call(ctx context.Context, method string, req proto.Message, resp proto.Message) error { payload, err := proto.Marshal(req) if err != nil { return err } url := baseUrl(ctx) + internal.ProjID(ctx) + "/" + method r, err := internal.HTTPClient(ctx).Post(url, "application/x-protobuf", bytes.NewReader(payload)) if err != nil { return err } defer r.Body.Close() all, err := ioutil.ReadAll(r.Body) if r.StatusCode != http.StatusOK { e := &errHTTP{ StatusCode: r.StatusCode, err: err, } if err == nil { e.Body = string(all) } return e } if err != nil { return err } if err = proto.Unmarshal(all, resp); err != nil { return err } return nil } func keyToProto(k *Key) *pb.Key { if k == nil { return nil } // TODO(jbd): Eliminate unrequired allocations. path := []*pb.Key_PathElement(nil) for { el := &pb.Key_PathElement{ Kind: proto.String(k.kind), } if k.id != 0 { el.Id = proto.Int64(k.id) } if k.name != "" { el.Name = proto.String(k.name) } path = append([]*pb.Key_PathElement{el}, path...) if k.parent == nil { break } k = k.parent } key := &pb.Key{ PathElement: path, } if k.namespace != "" { key.PartitionId = &pb.PartitionId{ Namespace: proto.String(k.namespace), } } return key } func protoToKey(p *pb.Key) *Key { keys := make([]*Key, len(p.GetPathElement())) for i, el := range p.GetPathElement() { keys[i] = &Key{ namespace: p.GetPartitionId().GetNamespace(), kind: el.GetKind(), id: el.GetId(), name: el.GetName(), } } for i := 0; i < len(keys)-1; i++ { keys[i+1].parent = keys[i] } return keys[len(keys)-1] } // multiKeyToProto is a batch version of keyToProto. func multiKeyToProto(keys []*Key) []*pb.Key { ret := make([]*pb.Key, len(keys)) for i, k := range keys { ret[i] = keyToProto(k) } return ret } // multiKeyToProto is a batch version of keyToProto. func multiProtoToKey(keys []*pb.Key) []*Key { ret := make([]*Key, len(keys)) for i, k := range keys { ret[i] = protoToKey(k) } return ret } // multiValid is a batch version of Key.valid. It returns an error, not a // []bool. func multiValid(key []*Key) error { invalid := false for _, k := range key { if !k.valid() { invalid = true break } } if !invalid { return nil } err := make(MultiError, len(key)) for i, k := range key { if !k.valid() { err[i] = ErrInvalidKey } } return err } // checkMultiArg checks that v has type []S, []*S, []I, or []P, for some struct // type S, for some interface type I, or some non-interface non-pointer type P // such that P or *P implements PropertyLoadSaver. // // It returns what category the slice's elements are, and the reflect.Type // that represents S, I or P. // // As a special case, PropertyList is an invalid type for v. func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) { if v.Kind() != reflect.Slice { return multiArgTypeInvalid, nil } if v.Type() == typeOfPropertyList { return multiArgTypeInvalid, nil } elemType = v.Type().Elem() if reflect.PtrTo(elemType).Implements(typeOfPropertyLoadSaver) { return multiArgTypePropertyLoadSaver, elemType } switch elemType.Kind() { case reflect.Struct: return multiArgTypeStruct, elemType case reflect.Interface: return multiArgTypeInterface, elemType case reflect.Ptr: elemType = elemType.Elem() if elemType.Kind() == reflect.Struct { return multiArgTypeStructPtr, elemType } } return multiArgTypeInvalid, nil } // Get loads the entity stored for key into dst, which must be a struct pointer // or implement PropertyLoadSaver. If there is no such entity for the key, Get // returns ErrNoSuchEntity. // // The values of dst's unmatched struct fields are not modified, and matching // slice-typed fields are not reset before appending to them. In particular, it // is recommended to pass a pointer to a zero valued struct on each Get call. // // ErrFieldMismatch is returned when a field is to be loaded into a different // type than the one it was stored from, or when a field is missing or // unexported in the destination struct. ErrFieldMismatch is only returned if // dst is a struct pointer. func Get(ctx context.Context, key *Key, dst interface{}) error { err := get(ctx, []*Key{key}, []interface{}{dst}, nil) if me, ok := err.(MultiError); ok { return me[0] } return err } // GetMulti is a batch version of Get. // // dst must be a []S, []*S, []I or []P, for some struct type S, some interface // type I, or some non-interface non-pointer type P such that P or *P // implements PropertyLoadSaver. If an []I, each element must be a valid dst // for Get: it must be a struct pointer or implement PropertyLoadSaver. // // As a special case, PropertyList is an invalid type for dst, even though a // PropertyList is a slice of structs. It is treated as invalid to avoid being // mistakenly passed when []PropertyList was intended. func GetMulti(ctx context.Context, keys []*Key, dst interface{}) error { return get(ctx, keys, dst, nil) } func get(ctx context.Context, keys []*Key, dst interface{}, opts *pb.ReadOptions) error { v := reflect.ValueOf(dst) multiArgType, _ := checkMultiArg(v) // Sanity checks if multiArgType == multiArgTypeInvalid { return errors.New("datastore: dst has invalid type") } if len(keys) != v.Len() { return errors.New("datastore: keys and dst slices have different length") } if len(keys) == 0 { return nil } // Go through keys, validate them, serialize then, and create a dict mapping them to their index multiErr, any := make(MultiError, len(keys)), false keyMap := make(map[string]int) pbKeys := make([]*pb.Key, len(keys)) for i, k := range keys { if !k.valid() { multiErr[i] = ErrInvalidKey any = true } else { keyMap[k.String()] = i pbKeys[i] = keyToProto(k) } } if any { return multiErr } req := &pb.LookupRequest{ Key: pbKeys, ReadOptions: opts, } resp := &pb.LookupResponse{} if err := call(ctx, "lookup", req, resp); err != nil { return err } if len(resp.Deferred) > 0 { // TODO(jbd): Assess whether we should retry the deferred keys. return errors.New("datastore: some entities temporarily unavailable") } if len(keys) != len(resp.Found)+len(resp.Missing) { return errors.New("datastore: internal error: server returned the wrong number of entities") } for _, e := range resp.Found { k := protoToKey(e.Entity.Key) index := keyMap[k.String()] elem := v.Index(index) if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct { elem = elem.Addr() } err := loadEntity(elem.Interface(), e.Entity) if err != nil { multiErr[index] = err any = true } } for _, e := range resp.Missing { k := protoToKey(e.Entity.Key) multiErr[keyMap[k.String()]] = ErrNoSuchEntity any = true } if any { return multiErr } return nil } // Put saves the entity src into the datastore with key k. src must be a struct // pointer or implement PropertyLoadSaver; if a struct pointer then any // unexported fields of that struct will be skipped. If k is an incomplete key, // the returned key will be a unique key generated by the datastore. func Put(ctx context.Context, key *Key, src interface{}) (*Key, error) { k, err := PutMulti(ctx, []*Key{key}, []interface{}{src}) if err != nil { if me, ok := err.(MultiError); ok { return nil, me[0] } return nil, err } return k[0], nil } // PutMulti is a batch version of Put. // // src must satisfy the same conditions as the dst argument to GetMulti. func PutMulti(ctx context.Context, keys []*Key, src interface{}) ([]*Key, error) { mutation, err := putMutation(keys, src) if err != nil { return nil, err } // Make the request. req := &pb.CommitRequest{ Mutation: mutation, Mode: pb.CommitRequest_NON_TRANSACTIONAL.Enum(), } resp := &pb.CommitResponse{} if err := call(ctx, "commit", req, resp); err != nil { return nil, err } // Copy any newly minted keys into the returned keys. newKeys := make(map[int]int) // Map of index in returned slice to index in response. ret := make([]*Key, len(keys)) var idx int for i, key := range keys { if key.Incomplete() { // This key will be in the mutation result. newKeys[i] = idx idx++ } else { ret[i] = key } } if len(newKeys) != len(resp.MutationResult.InsertAutoIdKey) { return nil, errors.New("datastore: internal error: server returned the wrong number of keys") } for retI, respI := range newKeys { ret[retI] = protoToKey(resp.MutationResult.InsertAutoIdKey[respI]) } return ret, nil } func putMutation(keys []*Key, src interface{}) (*pb.Mutation, error) { v := reflect.ValueOf(src) multiArgType, _ := checkMultiArg(v) if multiArgType == multiArgTypeInvalid { return nil, errors.New("datastore: src has invalid type") } if len(keys) != v.Len() { return nil, errors.New("datastore: key and src slices have different length") } if len(keys) == 0 { return nil, nil } if err := multiValid(keys); err != nil { return nil, err } var upsert, insert []*pb.Entity for i, k := range keys { val := reflect.ValueOf(src).Index(i) // If src is an interface slice []interface{}{ent1, ent2} if val.Kind() == reflect.Interface && val.Elem().Kind() == reflect.Slice { val = val.Elem() } // If src is a slice of ptrs []*T{ent1, ent2} if val.Kind() == reflect.Ptr && val.Elem().Kind() == reflect.Slice { val = val.Elem() } p, err := saveEntity(k, val.Interface()) if err != nil { return nil, fmt.Errorf("datastore: Error while saving %v: %v", k.String(), err) } if k.Incomplete() { insert = append(insert, p) } else { upsert = append(upsert, p) } } return &pb.Mutation{ InsertAutoId: insert, Upsert: upsert, }, nil } // Delete deletes the entity for the given key. func Delete(ctx context.Context, key *Key) error { err := DeleteMulti(ctx, []*Key{key}) if me, ok := err.(MultiError); ok { return me[0] } return err } // DeleteMulti is a batch version of Delete. func DeleteMulti(ctx context.Context, keys []*Key) error { mutation, err := deleteMutation(keys) if err != nil { return err } req := &pb.CommitRequest{ Mutation: mutation, Mode: pb.CommitRequest_NON_TRANSACTIONAL.Enum(), } resp := &pb.CommitResponse{} return call(ctx, "commit", req, resp) } func deleteMutation(keys []*Key) (*pb.Mutation, error) { protoKeys := make([]*pb.Key, len(keys)) for i, k := range keys { if k.Incomplete() { return nil, fmt.Errorf("datastore: can't delete the incomplete key: %v", k) } protoKeys[i] = keyToProto(k) } return &pb.Mutation{ Delete: protoKeys, }, nil } ================================================ FILE: vendor/google.golang.org/cloud/datastore/errors.go ================================================ // Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // This file provides error functions for common API failure modes. package datastore import ( "fmt" ) // MultiError is returned by batch operations when there are errors with // particular elements. Errors will be in a one-to-one correspondence with // the input elements; successful elements will have a nil entry. type MultiError []error func (m MultiError) Error() string { s, n := "", 0 for _, e := range m { if e != nil { if n == 0 { s = e.Error() } n++ } } switch n { case 0: return "(0 errors)" case 1: return s case 2: return s + " (and 1 other error)" } return fmt.Sprintf("%s (and %d other errors)", s, n-1) } ================================================ FILE: vendor/google.golang.org/cloud/datastore/key.go ================================================ // Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package datastore import ( "bytes" "encoding/base64" "encoding/gob" "errors" "strconv" "strings" "github.com/golang/protobuf/proto" "golang.org/x/net/context" pb "google.golang.org/cloud/internal/datastore" ) // Key represents the datastore key for a stored entity, and is immutable. type Key struct { kind string id int64 name string parent *Key namespace string } func (k *Key) Kind() string { return k.kind } func (k *Key) ID() int64 { return k.id } func (k *Key) Name() string { return k.name } func (k *Key) Parent() *Key { return k.parent } func (k *Key) SetParent(v *Key) { if v.Incomplete() { panic("can't set an incomplete key as parent") } k.parent = v } func (k *Key) Namespace() string { return k.namespace } // Complete returns whether the key does not refer to a stored entity. func (k *Key) Incomplete() bool { return k.name == "" && k.id == 0 } // valid returns whether the key is valid. func (k *Key) valid() bool { if k == nil { return false } for ; k != nil; k = k.parent { if k.kind == "" { return false } if k.name != "" && k.id != 0 { return false } if k.parent != nil { if k.parent.Incomplete() { return false } if k.parent.namespace != k.namespace { return false } } } return true } func (k *Key) Equal(o *Key) bool { for { if k == nil || o == nil { return k == o // if either is nil, both must be nil } if k.namespace != o.namespace || k.name != o.name || k.id != o.id || k.kind != o.kind { return false } if k.parent == nil && o.parent == nil { return true } k = k.parent o = o.parent } } // marshal marshals the key's string representation to the buffer. func (k *Key) marshal(b *bytes.Buffer) { if k.parent != nil { k.parent.marshal(b) } b.WriteByte('/') b.WriteString(k.kind) b.WriteByte(',') if k.name != "" { b.WriteString(k.name) } else { b.WriteString(strconv.FormatInt(k.id, 10)) } } // String returns a string representation of the key. func (k *Key) String() string { if k == nil { return "" } b := bytes.NewBuffer(make([]byte, 0, 512)) k.marshal(b) return b.String() } // Note: Fields not renamed compared to appengine gobKey struct // This ensures gobs created by appengine can be read here, and vice/versa type gobKey struct { Kind string StringID string IntID int64 Parent *gobKey AppID string Namespace string } func keyToGobKey(k *Key) *gobKey { if k == nil { return nil } return &gobKey{ Kind: k.kind, StringID: k.name, IntID: k.id, Parent: keyToGobKey(k.parent), Namespace: k.namespace, } } func gobKeyToKey(gk *gobKey) *Key { if gk == nil { return nil } return &Key{ kind: gk.Kind, name: gk.StringID, id: gk.IntID, parent: gobKeyToKey(gk.Parent), namespace: gk.Namespace, } } func (k *Key) GobEncode() ([]byte, error) { buf := new(bytes.Buffer) if err := gob.NewEncoder(buf).Encode(keyToGobKey(k)); err != nil { return nil, err } return buf.Bytes(), nil } func (k *Key) GobDecode(buf []byte) error { gk := new(gobKey) if err := gob.NewDecoder(bytes.NewBuffer(buf)).Decode(gk); err != nil { return err } *k = *gobKeyToKey(gk) return nil } func (k *Key) MarshalJSON() ([]byte, error) { return []byte(`"` + k.Encode() + `"`), nil } func (k *Key) UnmarshalJSON(buf []byte) error { if len(buf) < 2 || buf[0] != '"' || buf[len(buf)-1] != '"' { return errors.New("datastore: bad JSON key") } k2, err := DecodeKey(string(buf[1 : len(buf)-1])) if err != nil { return err } *k = *k2 return nil } // Encode returns an opaque representation of the key // suitable for use in HTML and URLs. // This is compatible with the Python and Java runtimes. func (k *Key) Encode() string { pKey := keyToProto(k) b, err := proto.Marshal(pKey) if err != nil { panic(err) } // Trailing padding is stripped. return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") } // DecodeKey decodes a key from the opaque representation returned by Encode. func DecodeKey(encoded string) (*Key, error) { // Re-add padding. if m := len(encoded) % 4; m != 0 { encoded += strings.Repeat("=", 4-m) } b, err := base64.URLEncoding.DecodeString(encoded) if err != nil { return nil, err } pKey := new(pb.Key) if err := proto.Unmarshal(b, pKey); err != nil { return nil, err } return protoToKey(pKey), nil } // NewIncompleteKey creates a new incomplete key. // kind cannot be empty. func NewIncompleteKey(ctx context.Context, kind string, parent *Key) *Key { return NewKey(ctx, kind, "", 0, parent) } // NewKey creates a new key. // kind cannot be empty. // Either one or both of stringID and intID must be zero. If both are zero, // the key returned is incomplete. // parent must either be a complete key or nil. func NewKey(ctx context.Context, kind, name string, id int64, parent *Key) *Key { return &Key{ kind: kind, name: name, id: id, parent: parent, namespace: ctxNamespace(ctx), } } // AllocateIDs accepts a slice of incomplete keys and returns a // slice of complete keys that are guaranteed to be valid in the datastore func AllocateIDs(ctx context.Context, keys []*Key) ([]*Key, error) { if keys == nil { return nil, nil } req := &pb.AllocateIdsRequest{Key: multiKeyToProto(keys)} res := &pb.AllocateIdsResponse{} if err := call(ctx, "allocateIds", req, res); err != nil { return nil, err } return multiProtoToKey(res.Key), nil } ================================================ FILE: vendor/google.golang.org/cloud/datastore/load.go ================================================ // Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package datastore import ( "fmt" "reflect" "time" pb "google.golang.org/cloud/internal/datastore" ) var ( typeOfByteSlice = reflect.TypeOf([]byte(nil)) typeOfTime = reflect.TypeOf(time.Time{}) ) // typeMismatchReason returns a string explaining why the property p could not // be stored in an entity field of type v.Type(). func typeMismatchReason(p Property, v reflect.Value) string { entityType := "empty" switch p.Value.(type) { case int64: entityType = "int" case bool: entityType = "bool" case string: entityType = "string" case float64: entityType = "float" case *Key: entityType = "*datastore.Key" case time.Time: entityType = "time.Time" case []byte: entityType = "[]byte" } return fmt.Sprintf("type mismatch: %s versus %v", entityType, v.Type()) } type propertyLoader struct { // m holds the number of times a substruct field like "Foo.Bar.Baz" has // been seen so far. The map is constructed lazily. m map[string]int } func (l *propertyLoader) load(codec *structCodec, structValue reflect.Value, p Property, prev map[string]struct{}) string { var sliceOk bool var v reflect.Value // Traverse a struct's struct-typed fields. for name := p.Name; ; { decoder, ok := codec.byName[name] if !ok { return "no such struct field" } v = structValue.Field(decoder.index) if !v.IsValid() { return "no such struct field" } if !v.CanSet() { return "cannot set struct field" } if decoder.substructCodec == nil { break } if v.Kind() == reflect.Slice { if l.m == nil { l.m = make(map[string]int) } index := l.m[p.Name] l.m[p.Name] = index + 1 for v.Len() <= index { v.Set(reflect.Append(v, reflect.New(v.Type().Elem()).Elem())) } structValue = v.Index(index) sliceOk = true } else { structValue = v } // Strip the "I." from "I.X". name = name[len(codec.byIndex[decoder.index].name):] codec = decoder.substructCodec } var slice reflect.Value if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { slice = v v = reflect.New(v.Type().Elem()).Elem() } else if _, ok := prev[p.Name]; ok && !sliceOk { // Zero the field back out that was set previously, turns out its a slice and we don't know what to do with it v.Set(reflect.Zero(v.Type())) return "multiple-valued property requires a slice field type" } prev[p.Name] = struct{}{} pValue := p.Value switch v.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: x, ok := pValue.(int64) if !ok && pValue != nil { return typeMismatchReason(p, v) } if v.OverflowInt(x) { return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type()) } v.SetInt(x) case reflect.Bool: x, ok := pValue.(bool) if !ok && pValue != nil { return typeMismatchReason(p, v) } v.SetBool(x) case reflect.String: x, ok := pValue.(string) if !ok && pValue != nil { return typeMismatchReason(p, v) } v.SetString(x) case reflect.Float32, reflect.Float64: x, ok := pValue.(float64) if !ok && pValue != nil { return typeMismatchReason(p, v) } if v.OverflowFloat(x) { return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type()) } v.SetFloat(x) case reflect.Ptr: x, ok := pValue.(*Key) if !ok && pValue != nil { return typeMismatchReason(p, v) } if _, ok := v.Interface().(*Key); !ok { return typeMismatchReason(p, v) } v.Set(reflect.ValueOf(x)) case reflect.Struct: switch v.Type() { case typeOfTime: x, ok := pValue.(time.Time) if !ok && pValue != nil { return typeMismatchReason(p, v) } v.Set(reflect.ValueOf(x)) default: return typeMismatchReason(p, v) } case reflect.Slice: x, ok := pValue.([]byte) if !ok && pValue != nil { return typeMismatchReason(p, v) } if v.Type().Elem().Kind() != reflect.Uint8 { return typeMismatchReason(p, v) } v.SetBytes(x) default: return typeMismatchReason(p, v) } if slice.IsValid() { slice.Set(reflect.Append(slice, v)) } return "" } // loadEntity loads an EntityProto into PropertyLoadSaver or struct pointer. func loadEntity(dst interface{}, src *pb.Entity) (err error) { props := protoToProperties(src) if e, ok := dst.(PropertyLoadSaver); ok { return e.Load(props) } return LoadStruct(dst, props) } func (s structPLS) Load(props []Property) error { var fieldName, reason string var l propertyLoader prev := make(map[string]struct{}) for _, p := range props { if errStr := l.load(s.codec, s.v, p, prev); errStr != "" { // We don't return early, as we try to load as many properties as possible. // It is valid to load an entity into a struct that cannot fully represent it. // That case returns an error, but the caller is free to ignore it. fieldName, reason = p.Name, errStr } } if reason != "" { return &ErrFieldMismatch{ StructType: s.v.Type(), FieldName: fieldName, Reason: reason, } } return nil } func protoToProperties(src *pb.Entity) []Property { props := src.Property out := make([]Property, 0, len(props)) for { var ( x *pb.Property noIndex bool ) if len(props) > 0 { x, props = props[0], props[1:] noIndex = !x.GetValue().GetIndexed() } else { break } if x.Value.ListValue == nil { out = append(out, Property{ Name: x.GetName(), Value: propValue(x.Value), NoIndex: noIndex, Multiple: false, }) } else { for _, v := range x.Value.ListValue { out = append(out, Property{ Name: x.GetName(), Value: propValue(v), NoIndex: noIndex, Multiple: true, }) } } } return out } // propValue returns a Go value that combines the raw PropertyValue with a // meaning. For example, an Int64Value with GD_WHEN becomes a time.Time. func propValue(v *pb.Value) interface{} { //TODO(PSG-Luna): Support EntityValue //TODO(PSG-Luna): GeoPoint seems gone from the v1 proto, reimplement it once it's readded switch { case v.IntegerValue != nil: return *v.IntegerValue case v.TimestampMicrosecondsValue != nil: return fromUnixMicro(*v.TimestampMicrosecondsValue) case v.BooleanValue != nil: return *v.BooleanValue case v.StringValue != nil: return *v.StringValue case v.BlobValue != nil: return []byte(v.BlobValue) case v.BlobKeyValue != nil: return *v.BlobKeyValue case v.DoubleValue != nil: return *v.DoubleValue case v.KeyValue != nil: return protoToKey(v.KeyValue) } return nil } ================================================ FILE: vendor/google.golang.org/cloud/datastore/prop.go ================================================ // Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package datastore import ( "fmt" "reflect" "strings" "sync" "unicode" ) // Entities with more than this many indexed properties will not be saved. const maxIndexedProperties = 5000 // []byte fields more than 1 megabyte long will not be loaded or saved. const maxBlobLen = 1 << 20 // Property is a name/value pair plus some metadata. A datastore entity's // contents are loaded and saved as a sequence of Properties. An entity can // have multiple Properties with the same name, provided that p.Multiple is // true on all of that entity's Properties with that name. type Property struct { // Name is the property name. Name string // Value is the property value. The valid types are: // - int64 // - bool // - string // - float64 // - *Key // - time.Time // - []byte (up to 1 megabyte in length) // This set is smaller than the set of valid struct field types that the // datastore can load and save. A Property Value cannot be a slice (apart // from []byte); use multiple Properties instead. Also, a Value's type // must be explicitly on the list above; it is not sufficient for the // underlying type to be on that list. For example, a Value of "type // myInt64 int64" is invalid. Smaller-width integers and floats are also // invalid. Again, this is more restrictive than the set of valid struct // field types. // // A Value will have an opaque type when loading entities from an index, // such as via a projection query. Load entities into a struct instead // of a PropertyLoadSaver when using a projection query. // // A Value may also be the nil interface value; this is equivalent to // Python's None but not directly representable by a Go struct. Loading // a nil-valued property into a struct will set that field to the zero // value. Value interface{} // NoIndex is whether the datastore cannot index this property. // If NoIndex is set to false, []byte values are limited to 1500 bytes and // string values are limited to 1500 bytes. NoIndex bool // Multiple is whether the entity can have multiple properties with // the same name. Even if a particular instance only has one property with // a certain name, Multiple should be true if a struct would best represent // it as a field of type []T instead of type T. Multiple bool } // PropertyLoadSaver can be converted from and to a slice of Properties. type PropertyLoadSaver interface { Load([]Property) error Save() ([]Property, error) } // PropertyList converts a []Property to implement PropertyLoadSaver. type PropertyList []Property var ( typeOfPropertyLoadSaver = reflect.TypeOf((*PropertyLoadSaver)(nil)).Elem() typeOfPropertyList = reflect.TypeOf(PropertyList(nil)) ) // Load loads all of the provided properties into l. // It does not first reset *l to an empty slice. func (l *PropertyList) Load(p []Property) error { *l = append(*l, p...) return nil } // Save saves all of l's properties as a slice of Properties. func (l *PropertyList) Save() ([]Property, error) { return *l, nil } // validPropertyName returns whether name consists of one or more valid Go // identifiers joined by ".". func validPropertyName(name string) bool { if name == "" { return false } for _, s := range strings.Split(name, ".") { if s == "" { return false } first := true for _, c := range s { if first { first = false if c != '_' && !unicode.IsLetter(c) { return false } } else { if c != '_' && !unicode.IsLetter(c) && !unicode.IsDigit(c) { return false } } } } return true } // structTag is the parsed `datastore:"name,options"` tag of a struct field. // If a field has no tag, or the tag has an empty name, then the structTag's // name is just the field name. A "-" name means that the datastore ignores // that field. type structTag struct { name string noIndex bool } // structCodec describes how to convert a struct to and from a sequence of // properties. type structCodec struct { // byIndex gives the structTag for the i'th field. byIndex []structTag // byName gives the field codec for the structTag with the given name. byName map[string]fieldCodec // hasSlice is whether a struct or any of its nested or embedded structs // has a slice-typed field (other than []byte). hasSlice bool // complete is whether the structCodec is complete. An incomplete // structCodec may be encountered when walking a recursive struct. complete bool } // fieldCodec is a struct field's index and, if that struct field's type is // itself a struct, that substruct's structCodec. type fieldCodec struct { index int substructCodec *structCodec } // structCodecs collects the structCodecs that have already been calculated. var ( structCodecsMutex sync.Mutex structCodecs = make(map[reflect.Type]*structCodec) ) // getStructCodec returns the structCodec for the given struct type. func getStructCodec(t reflect.Type) (*structCodec, error) { structCodecsMutex.Lock() defer structCodecsMutex.Unlock() return getStructCodecLocked(t) } // getStructCodecLocked implements getStructCodec. The structCodecsMutex must // be held when calling this function. func getStructCodecLocked(t reflect.Type) (ret *structCodec, retErr error) { c, ok := structCodecs[t] if ok { return c, nil } c = &structCodec{ byIndex: make([]structTag, t.NumField()), byName: make(map[string]fieldCodec), } // Add c to the structCodecs map before we are sure it is good. If t is // a recursive type, it needs to find the incomplete entry for itself in // the map. structCodecs[t] = c defer func() { if retErr != nil { delete(structCodecs, t) } }() for i := range c.byIndex { f := t.Field(i) name, opts := f.Tag.Get("datastore"), "" if i := strings.Index(name, ","); i != -1 { name, opts = name[:i], name[i+1:] } if name == "" { if !f.Anonymous { name = f.Name } } else if name == "-" { c.byIndex[i] = structTag{name: name} continue } else if !validPropertyName(name) { return nil, fmt.Errorf("datastore: struct tag has invalid property name: %q", name) } substructType, fIsSlice := reflect.Type(nil), false switch f.Type.Kind() { case reflect.Struct: substructType = f.Type case reflect.Slice: if f.Type.Elem().Kind() == reflect.Struct { substructType = f.Type.Elem() } fIsSlice = f.Type != typeOfByteSlice c.hasSlice = c.hasSlice || fIsSlice } if substructType != nil && substructType != typeOfTime { if name != "" { name = name + "." } sub, err := getStructCodecLocked(substructType) if err != nil { return nil, err } if !sub.complete { return nil, fmt.Errorf("datastore: recursive struct: field %q", f.Name) } if fIsSlice && sub.hasSlice { return nil, fmt.Errorf( "datastore: flattening nested structs leads to a slice of slices: field %q", f.Name) } c.hasSlice = c.hasSlice || sub.hasSlice for relName := range sub.byName { absName := name + relName if _, ok := c.byName[absName]; ok { return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", absName) } c.byName[absName] = fieldCodec{index: i, substructCodec: sub} } } else { if _, ok := c.byName[name]; ok { return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", name) } c.byName[name] = fieldCodec{index: i} } c.byIndex[i] = structTag{ name: name, noIndex: opts == "noindex", } } c.complete = true return c, nil } // structPLS adapts a struct to be a PropertyLoadSaver. type structPLS struct { v reflect.Value codec *structCodec } // newStructPLS returns a PropertyLoadSaver for the struct pointer p. func newStructPLS(p interface{}) (PropertyLoadSaver, error) { v := reflect.ValueOf(p) if v.Kind() != reflect.Ptr || v.IsNil() || v.Elem().Kind() != reflect.Struct { return nil, ErrInvalidEntityType } v = v.Elem() codec, err := getStructCodec(v.Type()) if err != nil { return nil, err } return structPLS{v, codec}, nil } // LoadStruct loads the properties from p to dst. // dst must be a struct pointer. func LoadStruct(dst interface{}, p []Property) error { x, err := newStructPLS(dst) if err != nil { return err } return x.Load(p) } // SaveStruct returns the properties from src as a slice of Properties. // src must be a struct pointer. func SaveStruct(src interface{}) ([]Property, error) { x, err := newStructPLS(src) if err != nil { return nil, err } return x.Save() } ================================================ FILE: vendor/google.golang.org/cloud/datastore/query.go ================================================ // Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package datastore import ( "encoding/base64" "errors" "fmt" "math" "reflect" "strconv" "strings" "github.com/golang/protobuf/proto" "golang.org/x/net/context" pb "google.golang.org/cloud/internal/datastore" ) type operator int const ( lessThan operator = iota lessEq equal greaterEq greaterThan keyFieldName = "__key__" ) var operatorToProto = map[operator]*pb.PropertyFilter_Operator{ lessThan: pb.PropertyFilter_LESS_THAN.Enum(), lessEq: pb.PropertyFilter_LESS_THAN_OR_EQUAL.Enum(), equal: pb.PropertyFilter_EQUAL.Enum(), greaterEq: pb.PropertyFilter_GREATER_THAN_OR_EQUAL.Enum(), greaterThan: pb.PropertyFilter_GREATER_THAN.Enum(), } // filter is a conditional filter on query results. type filter struct { FieldName string Op operator Value interface{} } type sortDirection int const ( ascending sortDirection = iota descending ) var sortDirectionToProto = map[sortDirection]*pb.PropertyOrder_Direction{ ascending: pb.PropertyOrder_ASCENDING.Enum(), descending: pb.PropertyOrder_DESCENDING.Enum(), } // order is a sort order on query results. type order struct { FieldName string Direction sortDirection } // NewQuery creates a new Query for a specific entity kind. // // An empty kind means to return all entities, including entities created and // managed by other App Engine features, and is called a kindless query. // Kindless queries cannot include filters or sort orders on property values. func NewQuery(kind string) *Query { return &Query{ kind: kind, limit: -1, } } // Query represents a datastore query. type Query struct { kind string ancestor *Key filter []filter order []order projection []string distinct bool keysOnly bool eventual bool limit int32 offset int32 start []byte end []byte trans *Transaction err error } func (q *Query) clone() *Query { x := *q // Copy the contents of the slice-typed fields to a new backing store. if len(q.filter) > 0 { x.filter = make([]filter, len(q.filter)) copy(x.filter, q.filter) } if len(q.order) > 0 { x.order = make([]order, len(q.order)) copy(x.order, q.order) } return &x } // Ancestor returns a derivative query with an ancestor filter. // The ancestor should not be nil. func (q *Query) Ancestor(ancestor *Key) *Query { q = q.clone() if ancestor == nil { q.err = errors.New("datastore: nil query ancestor") return q } q.ancestor = ancestor return q } // EventualConsistency returns a derivative query that returns eventually // consistent results. // It only has an effect on ancestor queries. func (q *Query) EventualConsistency() *Query { q = q.clone() q.eventual = true return q } // Transaction returns a derivative query that is associated with the given // transaction. // // All reads performed as part of the transaction will come from a single // consistent snapshot. Furthermore, if the transaction is set to a // serializable isolation level, another transaction cannot concurrently modify // the data that is read or modified by this transaction. func (q *Query) Transaction(t *Transaction) *Query { q = q.clone() q.trans = t return q } // Filter returns a derivative query with a field-based filter. // The filterStr argument must be a field name followed by optional space, // followed by an operator, one of ">", "<", ">=", "<=", or "=". // Fields are compared against the provided value using the operator. // Multiple filters are AND'ed together. // Field names which contain spaces, quote marks, or operator characters // should be passed as quoted Go string literals as returned by strconv.Quote // or the fmt package's %q verb. func (q *Query) Filter(filterStr string, value interface{}) *Query { q = q.clone() filterStr = strings.TrimSpace(filterStr) if filterStr == "" { q.err = fmt.Errorf("datastore: invalid filter %q", filterStr) return q } f := filter{ FieldName: strings.TrimRight(filterStr, " ><=!"), Value: value, } switch op := strings.TrimSpace(filterStr[len(f.FieldName):]); op { case "<=": f.Op = lessEq case ">=": f.Op = greaterEq case "<": f.Op = lessThan case ">": f.Op = greaterThan case "=": f.Op = equal default: q.err = fmt.Errorf("datastore: invalid operator %q in filter %q", op, filterStr) return q } var err error f.FieldName, err = unquote(f.FieldName) if err != nil { q.err = fmt.Errorf("datastore: invalid syntax for quoted field name %q", f.FieldName) return q } q.filter = append(q.filter, f) return q } // Order returns a derivative query with a field-based sort order. Orders are // applied in the order they are added. The default order is ascending; to sort // in descending order prefix the fieldName with a minus sign (-). // Field names which contain spaces, quote marks, or the minus sign // should be passed as quoted Go string literals as returned by strconv.Quote // or the fmt package's %q verb. func (q *Query) Order(fieldName string) *Query { q = q.clone() fieldName, dir := strings.TrimSpace(fieldName), ascending if strings.HasPrefix(fieldName, "-") { fieldName, dir = strings.TrimSpace(fieldName[1:]), descending } else if strings.HasPrefix(fieldName, "+") { q.err = fmt.Errorf("datastore: invalid order: %q", fieldName) return q } fieldName, err := unquote(fieldName) if err != nil { q.err = fmt.Errorf("datastore: invalid syntax for quoted field name %q", fieldName) return q } if fieldName == "" { q.err = errors.New("datastore: empty order") return q } q.order = append(q.order, order{ Direction: dir, FieldName: fieldName, }) return q } // unquote optionally interprets s as a double-quoted or backquoted Go // string literal if it begins with the relevant character. func unquote(s string) (string, error) { if s == "" || (s[0] != '`' && s[0] != '"') { return s, nil } return strconv.Unquote(s) } // Project returns a derivative query that yields only the given fields. It // cannot be used with KeysOnly. func (q *Query) Project(fieldNames ...string) *Query { q = q.clone() q.projection = append([]string(nil), fieldNames...) return q } // Distinct returns a derivative query that yields de-duplicated entities with // respect to the set of projected fields. It is only used for projection // queries. func (q *Query) Distinct() *Query { q = q.clone() q.distinct = true return q } // KeysOnly returns a derivative query that yields only keys, not keys and // entities. It cannot be used with projection queries. func (q *Query) KeysOnly() *Query { q = q.clone() q.keysOnly = true return q } // Limit returns a derivative query that has a limit on the number of results // returned. A negative value means unlimited. func (q *Query) Limit(limit int) *Query { q = q.clone() if limit < math.MinInt32 || limit > math.MaxInt32 { q.err = errors.New("datastore: query limit overflow") return q } q.limit = int32(limit) return q } // Offset returns a derivative query that has an offset of how many keys to // skip over before returning results. A negative value is invalid. func (q *Query) Offset(offset int) *Query { q = q.clone() if offset < 0 { q.err = errors.New("datastore: negative query offset") return q } if offset > math.MaxInt32 { q.err = errors.New("datastore: query offset overflow") return q } q.offset = int32(offset) return q } // Start returns a derivative query with the given start point. func (q *Query) Start(c Cursor) *Query { q = q.clone() if c.cc == nil { q.err = errors.New("datastore: invalid cursor") return q } q.start = c.cc return q } // End returns a derivative query with the given end point. func (q *Query) End(c Cursor) *Query { q = q.clone() if c.cc == nil { q.err = errors.New("datastore: invalid cursor") return q } q.end = c.cc return q } // toProto converts the query to a protocol buffer. func (q *Query) toProto(req *pb.RunQueryRequest) error { dst := pb.Query{} if len(q.projection) != 0 && q.keysOnly { return errors.New("datastore: query cannot both project and be keys-only") } dst.Reset() if q.kind != "" { dst.Kind = []*pb.KindExpression{&pb.KindExpression{Name: proto.String(q.kind)}} } if q.projection != nil { for _, propertyName := range q.projection { dst.Projection = append(dst.Projection, &pb.PropertyExpression{Property: &pb.PropertyReference{Name: proto.String(propertyName)}}) } if q.distinct { for _, propertyName := range q.projection { dst.GroupBy = append(dst.GroupBy, &pb.PropertyReference{Name: proto.String(propertyName)}) } } } if q.keysOnly { dst.Projection = []*pb.PropertyExpression{&pb.PropertyExpression{Property: &pb.PropertyReference{Name: proto.String(keyFieldName)}}} } var filters []*pb.Filter for _, qf := range q.filter { if qf.FieldName == "" { return errors.New("datastore: empty query filter field name") } v, errStr := interfaceToProto(reflect.ValueOf(qf.Value).Interface()) if errStr != "" { return errors.New("datastore: bad query filter value type: " + errStr) } xf := &pb.PropertyFilter{ Operator: operatorToProto[qf.Op], Property: &pb.PropertyReference{Name: proto.String(qf.FieldName)}, Value: v, } if xf.Operator == nil { return errors.New("datastore: unknown query filter operator") } filters = append(filters, &pb.Filter{PropertyFilter: xf}) } if q.ancestor != nil { filters = append(filters, &pb.Filter{ PropertyFilter: &pb.PropertyFilter{ Property: &pb.PropertyReference{Name: proto.String("__key__")}, Operator: pb.PropertyFilter_HAS_ANCESTOR.Enum(), Value: &pb.Value{KeyValue: keyToProto(q.ancestor)}, }}) } if len(filters) == 1 { dst.Filter = filters[0] } else if len(filters) > 1 { dst.Filter = &pb.Filter{CompositeFilter: &pb.CompositeFilter{ Operator: pb.CompositeFilter_AND.Enum(), Filter: filters, }} } for _, qo := range q.order { if qo.FieldName == "" { return errors.New("datastore: empty query order field name") } xo := &pb.PropertyOrder{ Property: &pb.PropertyReference{Name: proto.String(qo.FieldName)}, Direction: sortDirectionToProto[qo.Direction], } if xo.Direction == nil { return errors.New("datastore: unknown query order direction") } dst.Order = append(dst.Order, xo) } if q.limit >= 0 { dst.Limit = proto.Int32(q.limit) } if q.offset != 0 { dst.Offset = proto.Int32(q.offset) } dst.StartCursor = q.start dst.EndCursor = q.end if t := q.trans; t != nil { if t.id == nil { return errExpiredTransaction } req.ReadOptions = &pb.ReadOptions{Transaction: t.id} } req.Query = &dst return nil } // Count returns the number of results for the query. func (q *Query) Count(ctx context.Context) (int, error) { // Check that the query is well-formed. if q.err != nil { return 0, q.err } // Run a copy of the query, with keysOnly true (if we're not a projection, // since the two are incompatible). newQ := q.clone() newQ.keysOnly = len(newQ.projection) == 0 req := &pb.RunQueryRequest{} if ns := ctxNamespace(ctx); ns != "" { req.PartitionId = &pb.PartitionId{ Namespace: proto.String(ns), } } if err := newQ.toProto(req); err != nil { return 0, err } res := &pb.RunQueryResponse{} if err := call(ctx, "runQuery", req, res); err != nil { return 0, err } var n int b := res.Batch for { n += len(b.GetEntityResult()) if b.GetMoreResults() != pb.QueryResultBatch_NOT_FINISHED { break } var err error // TODO(jbd): Support count queries that have a limit and an offset. if err = callNext(ctx, req, res, 0, 0); err != nil { return 0, err } } return int(n), nil } func callNext(ctx context.Context, req *pb.RunQueryRequest, res *pb.RunQueryResponse, offset, limit int32) error { if res.GetBatch().EndCursor == nil { return errors.New("datastore: internal error: server did not return a cursor") } req.Query.StartCursor = res.GetBatch().GetEndCursor() if limit >= 0 { req.Query.Limit = proto.Int32(limit) } if offset != 0 { req.Query.Offset = proto.Int32(offset) } res.Reset() return call(ctx, "runQuery", req, res) } // GetAll runs the query in the given context and returns all keys that match // that query, as well as appending the values to dst. // // dst must have type *[]S or *[]*S or *[]P, for some struct type S or some non- // interface, non-pointer type P such that P or *P implements PropertyLoadSaver. // // As a special case, *PropertyList is an invalid type for dst, even though a // PropertyList is a slice of structs. It is treated as invalid to avoid being // mistakenly passed when *[]PropertyList was intended. // // The keys returned by GetAll will be in a 1-1 correspondence with the entities // added to dst. // // If q is a ``keys-only'' query, GetAll ignores dst and only returns the keys. func (q *Query) GetAll(ctx context.Context, dst interface{}) ([]*Key, error) { var ( dv reflect.Value mat multiArgType elemType reflect.Type errFieldMismatch error ) if !q.keysOnly { dv = reflect.ValueOf(dst) if dv.Kind() != reflect.Ptr || dv.IsNil() { return nil, ErrInvalidEntityType } dv = dv.Elem() mat, elemType = checkMultiArg(dv) if mat == multiArgTypeInvalid || mat == multiArgTypeInterface { return nil, ErrInvalidEntityType } } var keys []*Key for t := q.Run(ctx); ; { k, e, err := t.next() if err == Done { break } if err != nil { return keys, err } if !q.keysOnly { ev := reflect.New(elemType) if elemType.Kind() == reflect.Map { // This is a special case. The zero values of a map type are // not immediately useful; they have to be make'd. // // Funcs and channels are similar, in that a zero value is not useful, // but even a freshly make'd channel isn't useful: there's no fixed // channel buffer size that is always going to be large enough, and // there's no goroutine to drain the other end. Theoretically, these // types could be supported, for example by sniffing for a constructor // method or requiring prior registration, but for now it's not a // frequent enough concern to be worth it. Programmers can work around // it by explicitly using Iterator.Next instead of the Query.GetAll // convenience method. x := reflect.MakeMap(elemType) ev.Elem().Set(x) } if err = loadEntity(ev.Interface(), e); err != nil { if _, ok := err.(*ErrFieldMismatch); ok { // We continue loading entities even in the face of field mismatch errors. // If we encounter any other error, that other error is returned. Otherwise, // an ErrFieldMismatch is returned. errFieldMismatch = err } else { return keys, err } } if mat != multiArgTypeStructPtr { ev = ev.Elem() } dv.Set(reflect.Append(dv, ev)) } keys = append(keys, k) } return keys, errFieldMismatch } // Run runs the query in the given context. func (q *Query) Run(ctx context.Context) *Iterator { if q.err != nil { return &Iterator{err: q.err} } t := &Iterator{ ctx: ctx, limit: q.limit, q: q, prevCC: q.start, } t.req.Reset() if ns := ctxNamespace(ctx); ns != "" { t.req.PartitionId = &pb.PartitionId{ Namespace: proto.String(ns), } } if err := q.toProto(&t.req); err != nil { t.err = err return t } if err := call(ctx, "runQuery", &t.req, &t.res); err != nil { t.err = err return t } b := t.res.GetBatch() offset := q.offset - b.GetSkippedResults() for offset > 0 && b.GetMoreResults() == pb.QueryResultBatch_NOT_FINISHED { t.prevCC = b.GetEndCursor() var err error if err = callNext(t.ctx, &t.req, &t.res, offset, t.limit); err != nil { t.err = err break } skip := b.GetSkippedResults() if skip < 0 { t.err = errors.New("datastore: internal error: negative number of skipped_results") break } offset -= skip } if offset < 0 { t.err = errors.New("datastore: internal error: query offset was overshot") } return t } // Iterator is the result of running a query. type Iterator struct { ctx context.Context err error // req is the request we sent previously, we need to keep track of it to resend it req pb.RunQueryRequest // res is the result of the most recent RunQuery or Next API call. res pb.RunQueryResponse // i is how many elements of res.Result we have iterated over. i int // limit is the limit on the number of results this iterator should return. // A negative value means unlimited. limit int32 // q is the original query which yielded this iterator. q *Query // prevCC is the compiled cursor that marks the end of the previous batch // of results. prevCC []byte } // Done is returned when a query iteration has completed. var Done = errors.New("datastore: query has no more results") // Next returns the key of the next result. When there are no more results, // Done is returned as the error. // // If the query is not keys only and dst is non-nil, it also loads the entity // stored for that key into the struct pointer or PropertyLoadSaver dst, with // the same semantics and possible errors as for the Get function. func (t *Iterator) Next(dst interface{}) (*Key, error) { k, e, err := t.next() if err != nil { return nil, err } if dst != nil && !t.q.keysOnly { err = loadEntity(dst, e) } return k, err } func (t *Iterator) next() (*Key, *pb.Entity, error) { if t.err != nil { return nil, nil, t.err } // Issue datastore_v3/Next RPCs as necessary. b := t.res.GetBatch() for t.i == len(b.EntityResult) { if b.GetMoreResults() != pb.QueryResultBatch_NOT_FINISHED { t.err = Done return nil, nil, t.err } t.prevCC = b.GetEndCursor() if err := callNext(t.ctx, &t.req, &t.res, 0, t.limit); err != nil { t.err = err return nil, nil, t.err } if b.GetSkippedResults() != 0 { t.err = errors.New("datastore: internal error: iterator has skipped results") return nil, nil, t.err } t.i = 0 if t.limit >= 0 { t.limit -= int32(len(b.EntityResult)) if t.limit < 0 { t.err = errors.New("datastore: internal error: query returned more results than the limit") return nil, nil, t.err } } } // Extract the key from the t.i'th element of t.res.Result. e := b.EntityResult[t.i] t.i++ if e.Entity.Key == nil { return nil, nil, errors.New("datastore: internal error: server did not return a key") } k := protoToKey(e.Entity.Key) if k.Incomplete() { return nil, nil, errors.New("datastore: internal error: server returned an invalid key") } return k, e.Entity, nil } // Cursor returns a cursor for the iterator's current location. func (t *Iterator) Cursor() (Cursor, error) { if t.err != nil && t.err != Done { return Cursor{}, t.err } // If we are at either end of the current batch of results, // return the compiled cursor at that end. b := t.res.Batch skipped := b.GetSkippedResults() if t.i == 0 && skipped == 0 { if t.prevCC == nil { // A nil pointer (of type *pb.CompiledCursor) means no constraint: // passing it as the end cursor of a new query means unlimited results // (glossing over the integer limit parameter for now). // A non-nil pointer to an empty pb.CompiledCursor means the start: // passing it as the end cursor of a new query means 0 results. // If prevCC was nil, then the original query had no start cursor, but // Iterator.Cursor should return "the start" instead of unlimited. return Cursor{}, nil } return Cursor{t.prevCC}, nil } if t.i == len(b.EntityResult) { return Cursor{b.EndCursor}, nil } // Otherwise, re-run the query offset to this iterator's position, starting from // the most recent compiled cursor. This is done on a best-effort basis, as it // is racy; if a concurrent process has added or removed entities, then the // cursor returned may be inconsistent. q := t.q.clone() q.start = t.prevCC q.offset = skipped + int32(t.i) q.limit = 0 q.keysOnly = len(q.projection) == 0 t1 := q.Run(t.ctx) _, _, err := t1.next() if err != Done { if err == nil { err = fmt.Errorf("datastore: internal error: zero-limit query did not have zero results") } return Cursor{}, err } return Cursor{t1.res.Batch.EndCursor}, nil } // Cursor is an iterator's position. It can be converted to and from an opaque // string. A cursor can be used from different HTTP requests, but only with a // query with the same kind, ancestor, filter and order constraints. type Cursor struct { cc []byte } // String returns a base-64 string representation of a cursor. func (c Cursor) String() string { if c.cc == nil { return "" } return strings.TrimRight(base64.URLEncoding.EncodeToString(c.cc), "=") } // Decode decodes a cursor from its base-64 string representation. func DecodeCursor(s string) (Cursor, error) { if s == "" { return Cursor{}, nil } if n := len(s) % 4; n != 0 { s += strings.Repeat("=", 4-n) } b, err := base64.URLEncoding.DecodeString(s) if err != nil { return Cursor{}, err } return Cursor{b}, nil } ================================================ FILE: vendor/google.golang.org/cloud/datastore/save.go ================================================ // Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package datastore import ( "errors" "fmt" "reflect" "time" "github.com/golang/protobuf/proto" pb "google.golang.org/cloud/internal/datastore" ) // saveEntity saves an EntityProto into a PropertyLoadSaver or struct pointer. func saveEntity(key *Key, src interface{}) (*pb.Entity, error) { var err error var props []Property if e, ok := src.(PropertyLoadSaver); ok { props, err = e.Save() } else { props, err = SaveStruct(src) } if err != nil { return nil, err } return propertiesToProto(key, props) } func saveStructProperty(props *[]Property, name string, noIndex, multiple bool, v reflect.Value) error { p := Property{ Name: name, NoIndex: noIndex, Multiple: multiple, } switch x := v.Interface().(type) { case *Key, time.Time: p.Value = x default: switch v.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: p.Value = v.Int() case reflect.Bool: p.Value = v.Bool() case reflect.String: p.Value = v.String() case reflect.Float32, reflect.Float64: p.Value = v.Float() case reflect.Slice: if v.Type().Elem().Kind() == reflect.Uint8 { p.Value = v.Bytes() } case reflect.Struct: if !v.CanAddr() { return fmt.Errorf("datastore: unsupported struct field: value is unaddressable") } sub, err := newStructPLS(v.Addr().Interface()) if err != nil { return fmt.Errorf("datastore: unsupported struct field: %v", err) } return sub.(structPLS).save(props, name, noIndex, multiple) } } if p.Value == nil { return fmt.Errorf("datastore: unsupported struct field type: %v", v.Type()) } *props = append(*props, p) return nil } func (s structPLS) Save() ([]Property, error) { var props []Property if err := s.save(&props, "", false, false); err != nil { return nil, err } return props, nil } func (s structPLS) save(props *[]Property, prefix string, noIndex, multiple bool) error { for i, t := range s.codec.byIndex { if t.name == "-" { continue } name := t.name if prefix != "" { name = prefix + name } v := s.v.Field(i) if !v.IsValid() || !v.CanSet() { continue } noIndex1 := noIndex || t.noIndex // For slice fields that aren't []byte, save each element. if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { for j := 0; j < v.Len(); j++ { if err := saveStructProperty(props, name, noIndex1, true, v.Index(j)); err != nil { return err } } continue } // Otherwise, save the field itself. if err := saveStructProperty(props, name, noIndex1, multiple, v); err != nil { return err } } return nil } func propertiesToProto(key *Key, props []Property) (*pb.Entity, error) { e := &pb.Entity{ Key: keyToProto(key), } indexedProps := 0 prevMultiple := make(map[string]*pb.Property) for _, p := range props { val, err := interfaceToProto(p.Value) if err != "" { return nil, fmt.Errorf("datastore: %s for a Property with Name %q", err, p.Name) } if !p.NoIndex { rVal := reflect.ValueOf(p.Value) if rVal.Kind() == reflect.Slice && rVal.Type().Elem().Kind() != reflect.Uint8 { indexedProps += rVal.Len() } else { indexedProps++ } } if indexedProps > maxIndexedProperties { return nil, errors.New("datastore: too many indexed properties") } switch v := p.Value.(type) { case string: case []byte: if len(v) > 1500 && !p.NoIndex { return nil, fmt.Errorf("datastore: cannot index a Property with Name %q", p.Name) } } val.Indexed = proto.Bool(!p.NoIndex) if p.Multiple { x, ok := prevMultiple[p.Name] if !ok { x = &pb.Property{ Name: proto.String(p.Name), Value: &pb.Value{}, } prevMultiple[p.Name] = x e.Property = append(e.Property, x) } x.Value.ListValue = append(x.Value.ListValue, val) } else { e.Property = append(e.Property, &pb.Property{ Name: proto.String(p.Name), Value: val, }) } } return e, nil } func interfaceToProto(iv interface{}) (p *pb.Value, errStr string) { val := new(pb.Value) switch v := iv.(type) { case int: val.IntegerValue = proto.Int64(int64(v)) case int32: val.IntegerValue = proto.Int64(int64(v)) case int64: val.IntegerValue = proto.Int64(v) case bool: val.BooleanValue = proto.Bool(v) case string: val.StringValue = proto.String(v) case float32: val.DoubleValue = proto.Float64(float64(v)) case float64: val.DoubleValue = proto.Float64(v) case *Key: if v != nil { val.KeyValue = keyToProto(v) } case time.Time: if v.Before(minTime) || v.After(maxTime) { return nil, fmt.Sprintf("time value out of range") } val.TimestampMicrosecondsValue = proto.Int64(toUnixMicro(v)) case []byte: val.BlobValue = v default: if iv != nil { return nil, fmt.Sprintf("invalid Value type %t", iv) } } // TODO(jbd): Support ListValue and EntityValue. // TODO(jbd): Support types whose underlying type is one of the types above. return val, "" } ================================================ FILE: vendor/google.golang.org/cloud/datastore/time.go ================================================ // Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package datastore import ( "math" "time" ) var ( minTime = time.Unix(int64(math.MinInt64)/1e6, (int64(math.MinInt64)%1e6)*1e3) maxTime = time.Unix(int64(math.MaxInt64)/1e6, (int64(math.MaxInt64)%1e6)*1e3) ) func toUnixMicro(t time.Time) int64 { // We cannot use t.UnixNano() / 1e3 because we want to handle times more than // 2^63 nanoseconds (which is about 292 years) away from 1970, and those cannot // be represented in the numerator of a single int64 divide. return t.Unix()*1e6 + int64(t.Nanosecond()/1e3) } func fromUnixMicro(t int64) time.Time { return time.Unix(t/1e6, (t%1e6)*1e3) } ================================================ FILE: vendor/google.golang.org/cloud/datastore/transaction.go ================================================ // Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package datastore import ( "errors" "net/http" "github.com/golang/protobuf/proto" "golang.org/x/net/context" pb "google.golang.org/cloud/internal/datastore" ) // ErrConcurrentTransaction is returned when a transaction is rolled back due // to a conflict with a concurrent transaction. var ErrConcurrentTransaction = errors.New("datastore: concurrent transaction") var errExpiredTransaction = errors.New("datastore: transaction expired") // A TransactionOption configures the Transaction returned by NewTransaction. type TransactionOption interface { apply(*pb.BeginTransactionRequest) } type isolation struct { level pb.BeginTransactionRequest_IsolationLevel } func (i isolation) apply(req *pb.BeginTransactionRequest) { req.IsolationLevel = i.level.Enum() } var ( // Snapshot causes the transaction to enforce a snapshot isolation level. Snapshot TransactionOption = isolation{pb.BeginTransactionRequest_SNAPSHOT} // Serializable causes the transaction to enforce a serializable isolation level. Serializable TransactionOption = isolation{pb.BeginTransactionRequest_SERIALIZABLE} ) // Transaction represents a set of datastore operations to be committed atomically. // // Operations are enqueued by calling the Put and Delete methods on Transaction // (or their Multi-equivalents). These operations are only committed when the // Commit method is invoked. To ensure consistency, reads must be performed by // using Transaction's Get method or by using the Transaction method when // building a query. // // A Transaction must be committed or rolled back exactly once. type Transaction struct { id []byte ctx context.Context mutation *pb.Mutation // The mutations to apply. pending []*PendingKey // Incomplete keys pending transaction completion. } // NewTransaction starts a new transaction. func NewTransaction(ctx context.Context, opts ...TransactionOption) (*Transaction, error) { req, resp := &pb.BeginTransactionRequest{}, &pb.BeginTransactionResponse{} for _, o := range opts { o.apply(req) } if err := call(ctx, "beginTransaction", req, resp); err != nil { return nil, err } return &Transaction{ id: resp.Transaction, ctx: ctx, mutation: &pb.Mutation{}, }, nil } // Commit applies the enqueued operations atomically. func (t *Transaction) Commit() (*Commit, error) { if t.id == nil { return nil, errExpiredTransaction } req := &pb.CommitRequest{ Transaction: t.id, Mutation: t.mutation, Mode: pb.CommitRequest_TRANSACTIONAL.Enum(), } t.id = nil resp := &pb.CommitResponse{} if err := call(t.ctx, "commit", req, resp); err != nil { if e, ok := err.(*errHTTP); ok && e.StatusCode == http.StatusConflict { // TODO(jbd): Make sure that we explicitly handle the case where response // has an HTTP 409 and the error message indicates that it's an concurrent // transaction error. return nil, ErrConcurrentTransaction } return nil, err } // Copy any newly minted keys into the returned keys. if len(t.pending) != len(resp.MutationResult.InsertAutoIdKey) { return nil, errors.New("datastore: internal error: server returned the wrong number of keys") } commit := &Commit{} for i, p := range t.pending { p.key = protoToKey(resp.MutationResult.InsertAutoIdKey[i]) p.commit = commit } return commit, nil } // Rollback abandons a pending transaction. func (t *Transaction) Rollback() error { if t.id == nil { return errExpiredTransaction } id := t.id t.id = nil return call(t.ctx, "rollback", &pb.RollbackRequest{Transaction: id}, &pb.RollbackResponse{}) } // Get is the transaction-specific version of the package function Get. // All reads performed during the transaction will come from a single consistent // snapshot. Furthermore, if the transaction is set to a serializable isolation // level, another transaction cannot concurrently modify the data that is read // or modified by this transaction. func (t *Transaction) Get(key *Key, dst interface{}) error { err := get(t.ctx, []*Key{key}, []interface{}{dst}, &pb.ReadOptions{Transaction: t.id}) if me, ok := err.(MultiError); ok { return me[0] } return err } // GetMulti is a batch version of Get. func (t *Transaction) GetMulti(keys []*Key, dst interface{}) error { if t.id == nil { return errExpiredTransaction } return get(t.ctx, keys, dst, &pb.ReadOptions{Transaction: t.id}) } // Put is the transaction-specific version of the package function Put. // // Put returns a PendingKey which can be resolved into a Key using the // return value from a successful Commit. If key is an incomplete key, the // returned pending key will resolve to a unique key generated by the // datastore. func (t *Transaction) Put(key *Key, src interface{}) (*PendingKey, error) { h, err := t.PutMulti([]*Key{key}, []interface{}{src}) if err != nil { if me, ok := err.(MultiError); ok { return nil, me[0] } return nil, err } return h[0], nil } // PutMulti is a batch version of Put. One PendingKey is returned for each // element of src in the same order. func (t *Transaction) PutMulti(keys []*Key, src interface{}) ([]*PendingKey, error) { if t.id == nil { return nil, errExpiredTransaction } mutation, err := putMutation(keys, src) if err != nil { return nil, err } proto.Merge(t.mutation, mutation) // Prepare the returned handles, pre-populating where possible. ret := make([]*PendingKey, len(keys)) for _, key := range keys { h := &PendingKey{} if key.Incomplete() { // This key will be in the final commit result. t.pending = append(t.pending, h) } else { h.key = key } ret = append(ret, h) } return ret, nil } // Delete is the transaction-specific version of the package function Delete. // Delete enqueues the deletion of the entity for the given key, to be // committed atomically upon calling Commit. func (t *Transaction) Delete(key *Key) error { err := t.DeleteMulti([]*Key{key}) if me, ok := err.(MultiError); ok { return me[0] } return err } // DeleteMulti is a batch version of Delete. func (t *Transaction) DeleteMulti(keys []*Key) error { if t.id == nil { return errExpiredTransaction } mutation, err := deleteMutation(keys) if err != nil { return err } proto.Merge(t.mutation, mutation) return nil } // Commit represents the result of a committed transaction. type Commit struct{} // Key resolves a pending key handle into a final key. func (c *Commit) Key(p *PendingKey) *Key { if c != p.commit { panic("PendingKey was not created by corresponding transaction") } return p.key } // PendingKey represents the key for newly-inserted entity. It can be // resolved into a Key by calling the Key method of Commit. type PendingKey struct { key *Key commit *Commit } ================================================ FILE: vendor/google.golang.org/cloud/examples/bigquery/concat_table/main.go ================================================ // Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // concat_table is an example client of the bigquery client library. // It concatenates two BigQuery tables and writes the result to another table. package main import ( "flag" "fmt" "log" "os" "time" "golang.org/x/net/context" "golang.org/x/oauth2/google" "google.golang.org/cloud/bigquery" ) var ( project = flag.String("project", "", "The ID of a Google Cloud Platform project") dataset = flag.String("dataset", "", "The ID of a BigQuery dataset") src1 = flag.String("src1", "", "The ID of the first BigQuery table to concatenate") src2 = flag.String("src2", "", "The ID of the second BigQuery table to concatenate") dest = flag.String("dest", "", "The ID of the BigQuery table to write the result to") pollint = flag.Duration("pollint", 10*time.Second, "Polling interval for checking job status") ) func main() { flag.Parse() flagsOk := true for _, f := range []string{"project", "dataset", "src1", "src2", "dest"} { if flag.Lookup(f).Value.String() == "" { fmt.Fprintf(os.Stderr, "Flag --%s is required\n", f) flagsOk = false } } if !flagsOk { os.Exit(1) } if *src1 == *src2 || *src1 == *dest || *src2 == *dest { log.Fatalf("Different values must be supplied for each of --src1, --src2 and --dest") } httpClient, err := google.DefaultClient(context.Background(), bigquery.Scope) if err != nil { log.Fatalf("Creating http client: %v", err) } client, err := bigquery.NewClient(httpClient, *project) if err != nil { log.Fatalf("Creating bigquery client: %v", err) } s1 := &bigquery.Table{ ProjectID: *project, DatasetID: *dataset, TableID: *src1, } s2 := &bigquery.Table{ ProjectID: *project, DatasetID: *dataset, TableID: *src2, } d := &bigquery.Table{ ProjectID: *project, DatasetID: *dataset, TableID: *dest, WriteDisposition: bigquery.WriteTruncate, } // Concatenate data. job, err := client.Copy(context.Background(), d, bigquery.Tables{s1, s2}) if err != nil { log.Fatalf("Concatenating: %v", err) } fmt.Printf("Job for concatenation operation: %+v\n", job) fmt.Printf("Waiting for job to complete.\n") for range time.Tick(*pollint) { status, err := job.Status(context.Background()) if err != nil { fmt.Printf("Failure determining status: %v", err) break } if !status.Done() { continue } if err := status.Err(); err == nil { fmt.Printf("Success\n") } else { fmt.Printf("Failure: %+v\n", err) } break } } ================================================ FILE: vendor/google.golang.org/cloud/examples/bigquery/load/main.go ================================================ // Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // load is an example client of the bigquery client library. // It loads a file from Google Cloud Storage into a BigQuery table. package main import ( "flag" "fmt" "log" "os" "time" "golang.org/x/net/context" "golang.org/x/oauth2/google" "google.golang.org/cloud/bigquery" ) var ( project = flag.String("project", "", "The ID of a Google Cloud Platform project") dataset = flag.String("dataset", "", "The ID of a BigQuery dataset") table = flag.String("table", "", "The ID of a BigQuery table to load data into") bucket = flag.String("bucket", "", "The name of a Google Cloud Storage bucket to load data from") object = flag.String("object", "", "The name of a Google Cloud Storage object to load data from. Must exist within the bucket specified by --bucket") skiprows = flag.Int64("skiprows", 0, "The number of rows of the source data to skip when loading") pollint = flag.Duration("pollint", 10*time.Second, "Polling interval for checking job status") ) func main() { flag.Parse() flagsOk := true for _, f := range []string{"project", "dataset", "table", "bucket", "object"} { if flag.Lookup(f).Value.String() == "" { fmt.Fprintf(os.Stderr, "Flag --%s is required\n", f) flagsOk = false } } if !flagsOk { os.Exit(1) } httpClient, err := google.DefaultClient(context.Background(), bigquery.Scope) if err != nil { log.Fatalf("Creating http client: %v", err) } client, err := bigquery.NewClient(httpClient, *project) if err != nil { log.Fatalf("Creating bigquery client: %v", err) } table := &bigquery.Table{ ProjectID: *project, DatasetID: *dataset, TableID: *table, WriteDisposition: bigquery.WriteTruncate, } gcs := client.NewGCSReference(fmt.Sprintf("gs://%s/%s", *bucket, *object)) gcs.SkipLeadingRows = *skiprows // Load data from Google Cloud Storage into a BigQuery table. job, err := client.Copy( context.Background(), table, gcs, bigquery.MaxBadRecords(1), bigquery.AllowQuotedNewlines()) if err != nil { log.Fatalf("Loading data: %v", err) } fmt.Printf("Job for data load operation: %+v\n", job) fmt.Printf("Waiting for job to complete.\n") for range time.Tick(*pollint) { status, err := job.Status(context.Background()) if err != nil { fmt.Printf("Failure determining status: %v", err) break } if !status.Done() { continue } if err := status.Err(); err == nil { fmt.Printf("Success\n") } else { fmt.Printf("Failure: %+v\n", err) } break } } ================================================ FILE: vendor/google.golang.org/cloud/examples/bigquery/query/main.go ================================================ // Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // query is an example client of the bigquery client library. // It submits a query and writes the result to a table. package main import ( "flag" "fmt" "log" "os" "time" "golang.org/x/net/context" "golang.org/x/oauth2/google" "google.golang.org/cloud/bigquery" ) var ( project = flag.String("project", "", "The ID of a Google Cloud Platform project") dataset = flag.String("dataset", "", "The ID of a BigQuery dataset") q = flag.String("q", "", "The query string") dest = flag.String("dest", "", "The ID of the BigQuery table to write the result to. If unset, an ephemeral table ID will be generated.") pollint = flag.Duration("pollint", 10*time.Second, "Polling interval for checking job status") ) func main() { flag.Parse() flagsOk := true for _, f := range []string{"project", "dataset", "q"} { if flag.Lookup(f).Value.String() == "" { fmt.Fprintf(os.Stderr, "Flag --%s is required\n", f) flagsOk = false } } if !flagsOk { os.Exit(1) } httpClient, err := google.DefaultClient(context.Background(), bigquery.Scope) if err != nil { log.Fatalf("Creating http client: %v", err) } client, err := bigquery.NewClient(httpClient, *project) if err != nil { log.Fatalf("Creating bigquery client: %v", err) } d := &bigquery.Table{ WriteDisposition: bigquery.WriteTruncate, } if *dest != "" { d.ProjectID = *project d.DatasetID = *dataset d.TableID = *dest } query := &bigquery.Query{ Q: *q, DefaultProjectID: *project, DefaultDatasetID: *dataset, } // Query data. job, err := client.Copy(context.Background(), d, query) if err != nil { log.Fatalf("Querying: %v", err) } fmt.Printf("Job for query operation: %+v\n", job) fmt.Printf("Waiting for job to complete.\n") for range time.Tick(*pollint) { status, err := job.Status(context.Background()) if err != nil { fmt.Printf("Failure determining status: %v", err) break } if !status.Done() { continue } if err := status.Err(); err == nil { fmt.Printf("Success\n") } else { fmt.Printf("Failure: %+v\n", err) } break } } ================================================ FILE: vendor/google.golang.org/cloud/examples/bigquery/read/main.go ================================================ // Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // read is an example client of the bigquery client library. // It reads from a table, returning the data via an Iterator. package main import ( "flag" "fmt" "log" "os" "text/tabwriter" "golang.org/x/net/context" "golang.org/x/oauth2/google" "google.golang.org/cloud/bigquery" ) var ( project = flag.String("project", "", "The ID of a Google Cloud Platform project") dataset = flag.String("dataset", "", "The ID of a BigQuery dataset") table = flag.String("table", "", "The ID of a BigQuery table.") ) func main() { flag.Parse() flagsOk := true for _, f := range []string{"project", "dataset", "table"} { if flag.Lookup(f).Value.String() == "" { fmt.Fprintf(os.Stderr, "Flag --%s is required\n", f) flagsOk = false } } if !flagsOk { os.Exit(1) } httpClient, err := google.DefaultClient(context.Background(), bigquery.Scope) if err != nil { log.Fatalf("Creating http client: %v", err) } client, err := bigquery.NewClient(httpClient, *project) if err != nil { log.Fatalf("Creating bigquery client: %v", err) } it, err := client.Read(&bigquery.Table{ ProjectID: *project, DatasetID: *dataset, TableID: *table, }) if err != nil { log.Fatalf("Reading: %v", err) } // one-space padding. tw := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0) for it.Next(context.Background()) { var vals bigquery.ValueList if err := it.Get(&vals); err != nil { fmt.Printf("err calling get: %v\n", err) } else { sep := "" for _, v := range vals { fmt.Fprintf(tw, "%s%v", sep, v) sep = "\t" } fmt.Fprintf(tw, "\n") } } tw.Flush() if err := it.Err(); err != nil { fmt.Printf("err reading: %v\n") } } ================================================ FILE: vendor/google.golang.org/cloud/examples/pubsub/cmdline/main.go ================================================ // Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package main contains a simple command line tool for Cloud Pub/Sub // Cloud Pub/Sub docs: https://cloud.google.com/pubsub/docs package main import ( "errors" "flag" "fmt" "io/ioutil" "log" "net/http" "os" "strconv" "time" "golang.org/x/net/context" "golang.org/x/oauth2" "golang.org/x/oauth2/google" "google.golang.org/cloud" "google.golang.org/cloud/compute/metadata" "google.golang.org/cloud/pubsub" ) var ( jsonFile = flag.String("j", "", "A path to your JSON key file for your service account downloaded from Google Developer Console, not needed if you run it on Compute Engine instances.") projID = flag.String("p", "", "The ID of your Google Cloud project.") reportMPS = flag.Bool("report", false, "Reports the incoming/outgoing message rate in msg/sec if set.") size = flag.Int("size", 10, "Batch size for pull_messages and publish_messages subcommands.") ) const ( usage = `Available arguments are: create_topic delete_topic create_subscription delete_subscription publish pull_messages publish_messages ` tick = 1 * time.Second ) func usageAndExit(msg string) { fmt.Fprintln(os.Stderr, msg) fmt.Println("Flags:") flag.PrintDefaults() fmt.Fprint(os.Stderr, usage) os.Exit(2) } // Check the length of the arguments. func checkArgs(argv []string, min int) { if len(argv) < min { usageAndExit("Missing arguments") } } // newClient creates http.Client with a jwt service account when // jsonFile flag is specified, otherwise by obtaining the GCE service // account's access token. func newClient(jsonFile string) (*http.Client, error) { if jsonFile != "" { jsonKey, err := ioutil.ReadFile(jsonFile) if err != nil { return nil, err } conf, err := google.JWTConfigFromJSON(jsonKey, pubsub.ScopePubSub) if err != nil { return nil, err } return conf.Client(oauth2.NoContext), nil } if metadata.OnGCE() { c := &http.Client{ Transport: &oauth2.Transport{ Source: google.ComputeTokenSource(""), }, } if *projID == "" { projectID, err := metadata.ProjectID() if err != nil { return nil, fmt.Errorf("ProjectID failed, %v", err) } *projID = projectID } return c, nil } return nil, errors.New("Could not create an authenticated client.") } func listTopics(ctx context.Context, argv []string) { panic("listTopics not implemented yet") } func createTopic(ctx context.Context, argv []string) { checkArgs(argv, 2) topic := argv[1] err := pubsub.CreateTopic(ctx, topic) if err != nil { log.Fatalf("CreateTopic failed, %v", err) } fmt.Printf("Topic %s was created.\n", topic) } func deleteTopic(ctx context.Context, argv []string) { checkArgs(argv, 2) topic := argv[1] err := pubsub.DeleteTopic(ctx, topic) if err != nil { log.Fatalf("DeleteTopic failed, %v", err) } fmt.Printf("Topic %s was deleted.\n", topic) } func listSubscriptions(ctx context.Context, argv []string) { panic("listSubscriptions not implemented yet") } func createSubscription(ctx context.Context, argv []string) { checkArgs(argv, 3) sub := argv[1] topic := argv[2] err := pubsub.CreateSub(ctx, sub, topic, 60*time.Second, "") if err != nil { log.Fatalf("CreateSub failed, %v", err) } fmt.Printf("Subscription %s was created.\n", sub) } func deleteSubscription(ctx context.Context, argv []string) { checkArgs(argv, 2) sub := argv[1] err := pubsub.DeleteSub(ctx, sub) if err != nil { log.Fatalf("DeleteSub failed, %v", err) } fmt.Printf("Subscription %s was deleted.\n", sub) } func publish(ctx context.Context, argv []string) { checkArgs(argv, 3) topic := argv[1] message := argv[2] msgIDs, err := pubsub.Publish(ctx, topic, &pubsub.Message{ Data: []byte(message), }) if err != nil { log.Fatalf("Publish failed, %v", err) } fmt.Printf("Message '%s' published to a topic %s and the message id is %s\n", message, topic, msgIDs[0]) } type reporter struct { reportTitle string lastC uint64 c uint64 result <-chan int } func (r *reporter) report() { ticker := time.NewTicker(tick) defer func() { ticker.Stop() }() for { select { case <-ticker.C: n := r.c - r.lastC r.lastC = r.c mps := n / uint64(tick/time.Second) log.Printf("%s ~%d msgs/s, total: %d", r.reportTitle, mps, r.c) case n := <-r.result: r.c += uint64(n) } } } func ack(ctx context.Context, sub string, ackID ...string) { err := pubsub.Ack(ctx, sub, ackID...) if err != nil { log.Printf("Ack failed, %v\n", err) } } func pullLoop(ctx context.Context, sub string, result chan<- int) { for { msgs, err := pubsub.PullWait(ctx, sub, *size) if err != nil { log.Printf("PullWait failed, %v\n", err) time.Sleep(5 * time.Second) continue } if len(msgs) == 0 { log.Println("Received no messages") continue } if *reportMPS { result <- len(msgs) } ackIDs := make([]string, len(msgs)) for i, msg := range msgs { if !*reportMPS { fmt.Printf("Got a message: %s\n", msg.Data) } ackIDs[i] = msg.AckID } go ack(ctx, sub, ackIDs...) } } func pullMessages(ctx context.Context, argv []string) { checkArgs(argv, 3) sub := argv[1] workers, err := strconv.Atoi(argv[2]) if err != nil { log.Fatalf("Atoi failed, %v", err) } result := make(chan int, 1024) for i := 0; i < int(workers); i++ { go pullLoop(ctx, sub, result) } if *reportMPS { r := reporter{reportTitle: "Received", result: result} r.report() } else { select {} } } func publishLoop(ctx context.Context, topic string, workerid int, result chan<- int) { var r uint64 for { msgs := make([]*pubsub.Message, *size) for i := 0; i < *size; i++ { msgs[i] = &pubsub.Message{ Data: []byte(fmt.Sprintf("Worker: %d, Round: %d, Message: %d", workerid, r, i)), } } _, err := pubsub.Publish(ctx, topic, msgs...) if err != nil { log.Printf("Publish failed, %v\n", err) return } r++ if *reportMPS { result <- *size } } } func publishMessages(ctx context.Context, argv []string) { checkArgs(argv, 3) topic := argv[1] workers, err := strconv.Atoi(argv[2]) if err != nil { log.Fatalf("Atoi failed, %v", err) } result := make(chan int, 1024) for i := 0; i < int(workers); i++ { go publishLoop(ctx, topic, i, result) } if *reportMPS { r := reporter{reportTitle: "Sent", result: result} r.report() } else { select {} } } // This example demonstrates calling the Cloud Pub/Sub API. As of 22 // Oct 2014, the Cloud Pub/Sub API is only available if you're // whitelisted. If you're interested in using it, please apply for the // Limited Preview program at the following form: // http://goo.gl/Wql9HL // // Also, before running this example, be sure to enable Cloud Pub/Sub // service on your project in Developer Console at: // https://console.developers.google.com/ // // Unless you run this sample on Compute Engine instance, please // create a new service account and download a JSON key file for it at // the developer console: https://console.developers.google.com/ // // It has the following subcommands: // // create_topic // delete_topic // create_subscription // delete_subscription // publish // pull_messages // publish_messages // // You can choose any names for topic and subscription as long as they // follow the naming rule described at: // https://cloud.google.com/pubsub/overview#names // // You can create/delete topics/subscriptions by self-explanatory // subcommands. // // The "publish" subcommand is for publishing a single message to a // specified Cloud Pub/Sub topic. // // The "pull_messages" subcommand is for continuously pulling messages // from a specified Cloud Pub/Sub subscription with specified number // of workers. // // The "publish_messages" subcommand is for continuously publishing // messages to a specified Cloud Pub/Sub topic with specified number // of workers. func main() { flag.Parse() argv := flag.Args() checkArgs(argv, 1) client, err := newClient(*jsonFile) if err != nil { log.Fatalf("clientAndId failed, %v", err) } if *projID == "" { usageAndExit("Please specify Project ID.") } ctx := cloud.NewContext(*projID, client) m := map[string]func(ctx context.Context, argv []string){ "create_topic": createTopic, "delete_topic": deleteTopic, "create_subscription": createSubscription, "delete_subscription": deleteSubscription, "publish": publish, "pull_messages": pullMessages, "publish_messages": publishMessages, } subcommand := argv[0] f, ok := m[subcommand] if !ok { usageAndExit(fmt.Sprintf("Function not found for %s", subcommand)) } f(ctx, argv) } ================================================ FILE: vendor/google.golang.org/cloud/examples/storage/appengine/app.go ================================================ // Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package gcsdemo is an example App Engine or Mananged VM app using the Google Cloud Storage API. package gcsdemo import ( "bytes" "fmt" "io" "io/ioutil" "net/http" "strings" "golang.org/x/net/context" "golang.org/x/oauth2" "golang.org/x/oauth2/google" "google.golang.org/appengine" "google.golang.org/appengine/file" "google.golang.org/appengine/log" "google.golang.org/appengine/urlfetch" "google.golang.org/cloud" "google.golang.org/cloud/storage" ) // bucket is a local cache of the app's default bucket name. var bucket string // or: var bucket = ".appspot.com" func init() { http.HandleFunc("/", handler) } // demo struct holds information needed to run the various demo functions. type demo struct { c context.Context w http.ResponseWriter ctx context.Context // cleanUp is a list of filenames that need cleaning up at the end of the demo. cleanUp []string // failed indicates that one or more of the demo steps failed. failed bool } func (d *demo) errorf(format string, args ...interface{}) { d.failed = true log.Errorf(d.c, format, args...) } // handler is the main demo entry point that calls the GCS operations. func handler(w http.ResponseWriter, r *http.Request) { if r.URL.Path != "/" { http.NotFound(w, r) return } c := appengine.NewContext(r) if bucket == "" { var err error if bucket, err = file.DefaultBucketName(c); err != nil { log.Errorf(c, "failed to get default GCS bucket name: %v", err) return } } hc := &http.Client{ Transport: &oauth2.Transport{ Source: google.AppEngineTokenSource(c, storage.ScopeFullControl), Base: &urlfetch.Transport{Context: c}, }, } ctx := cloud.NewContext(appengine.AppID(c), hc) w.Header().Set("Content-Type", "text/plain; charset=utf-8") fmt.Fprintf(w, "Demo GCS Application running from Version: %v\n", appengine.VersionID(c)) fmt.Fprintf(w, "Using bucket name: %v\n\n", bucket) d := &demo{ c: c, w: w, ctx: ctx, } n := "demo-testfile-go" d.createFile(n) d.readFile(n) d.copyFile(n) d.statFile(n) d.createListFiles() d.listBucket() d.listBucketDirMode() d.defaultACL() d.putDefaultACLRule() d.deleteDefaultACLRule() d.bucketACL() d.putBucketACLRule() d.deleteBucketACLRule() d.acl(n) d.putACLRule(n) d.deleteACLRule(n) d.deleteFiles() if d.failed { io.WriteString(w, "\nDemo failed.\n") } else { io.WriteString(w, "\nDemo succeeded.\n") } } // createFile creates a file in Google Cloud Storage. func (d *demo) createFile(fileName string) { fmt.Fprintf(d.w, "Creating file /%v/%v\n", bucket, fileName) wc := storage.NewWriter(d.ctx, bucket, fileName) wc.ContentType = "text/plain" wc.Metadata = map[string]string{ "x-goog-meta-foo": "foo", "x-goog-meta-bar": "bar", } d.cleanUp = append(d.cleanUp, fileName) if _, err := wc.Write([]byte("abcde\n")); err != nil { d.errorf("createFile: unable to write data to bucket %q, file %q: %v", bucket, fileName, err) return } if _, err := wc.Write([]byte(strings.Repeat("f", 1024*4) + "\n")); err != nil { d.errorf("createFile: unable to write data to bucket %q, file %q: %v", bucket, fileName, err) return } if err := wc.Close(); err != nil { d.errorf("createFile: unable to close bucket %q, file %q: %v", bucket, fileName, err) return } } // readFile reads the named file in Google Cloud Storage. func (d *demo) readFile(fileName string) { io.WriteString(d.w, "\nAbbreviated file content (first line and last 1K):\n") rc, err := storage.NewReader(d.ctx, bucket, fileName) if err != nil { d.errorf("readFile: unable to open file from bucket %q, file %q: %v", bucket, fileName, err) return } defer rc.Close() slurp, err := ioutil.ReadAll(rc) if err != nil { d.errorf("readFile: unable to read data from bucket %q, file %q: %v", bucket, fileName, err) return } fmt.Fprintf(d.w, "%s\n", bytes.SplitN(slurp, []byte("\n"), 2)[0]) if len(slurp) > 1024 { fmt.Fprintf(d.w, "...%s\n", slurp[len(slurp)-1024:]) } else { fmt.Fprintf(d.w, "%s\n", slurp) } } // copyFile copies a file in Google Cloud Storage. func (d *demo) copyFile(fileName string) { copyName := fileName + "-copy" fmt.Fprintf(d.w, "Copying file /%v/%v to /%v/%v:\n", bucket, fileName, bucket, copyName) obj, err := storage.CopyObject(d.ctx, bucket, fileName, bucket, copyName, nil) if err != nil { d.errorf("copyFile: unable to copy /%v/%v to bucket %q, file %q: %v", bucket, fileName, bucket, copyName, err) return } d.cleanUp = append(d.cleanUp, copyName) d.dumpStats(obj) } func (d *demo) dumpStats(obj *storage.Object) { fmt.Fprintf(d.w, "(filename: /%v/%v, ", obj.Bucket, obj.Name) fmt.Fprintf(d.w, "ContentType: %q, ", obj.ContentType) fmt.Fprintf(d.w, "ACL: %#v, ", obj.ACL) fmt.Fprintf(d.w, "Owner: %v, ", obj.Owner) fmt.Fprintf(d.w, "ContentEncoding: %q, ", obj.ContentEncoding) fmt.Fprintf(d.w, "Size: %v, ", obj.Size) fmt.Fprintf(d.w, "MD5: %q, ", obj.MD5) fmt.Fprintf(d.w, "CRC32C: %q, ", obj.CRC32C) fmt.Fprintf(d.w, "Metadata: %#v, ", obj.Metadata) fmt.Fprintf(d.w, "MediaLink: %q, ", obj.MediaLink) fmt.Fprintf(d.w, "StorageClass: %q, ", obj.StorageClass) if !obj.Deleted.IsZero() { fmt.Fprintf(d.w, "Deleted: %v, ", obj.Deleted) } fmt.Fprintf(d.w, "Updated: %v)\n", obj.Updated) } // statFile reads the stats of the named file in Google Cloud Storage. func (d *demo) statFile(fileName string) { io.WriteString(d.w, "\nFile stat:\n") obj, err := storage.StatObject(d.ctx, bucket, fileName) if err != nil { d.errorf("statFile: unable to stat file from bucket %q, file %q: %v", bucket, fileName, err) return } d.dumpStats(obj) } // createListFiles creates files that will be used by listBucket. func (d *demo) createListFiles() { io.WriteString(d.w, "\nCreating more files for listbucket...\n") for _, n := range []string{"foo1", "foo2", "bar", "bar/1", "bar/2", "boo/"} { d.createFile(n) } } // listBucket lists the contents of a bucket in Google Cloud Storage. func (d *demo) listBucket() { io.WriteString(d.w, "\nListbucket result:\n") query := &storage.Query{Prefix: "foo"} for query != nil { objs, err := storage.ListObjects(d.ctx, bucket, query) if err != nil { d.errorf("listBucket: unable to list bucket %q: %v", bucket, err) return } query = objs.Next for _, obj := range objs.Results { d.dumpStats(obj) } } } func (d *demo) listDir(name, indent string) { query := &storage.Query{Prefix: name, Delimiter: "/"} for query != nil { objs, err := storage.ListObjects(d.ctx, bucket, query) if err != nil { d.errorf("listBucketDirMode: unable to list bucket %q: %v", bucket, err) return } query = objs.Next for _, obj := range objs.Results { fmt.Fprint(d.w, indent) d.dumpStats(obj) } for _, dir := range objs.Prefixes { fmt.Fprintf(d.w, "%v(directory: /%v/%v)\n", indent, bucket, dir) d.listDir(dir, indent+" ") } } } // listBucketDirMode lists the contents of a bucket in dir mode in Google Cloud Storage. func (d *demo) listBucketDirMode() { io.WriteString(d.w, "\nListbucket directory mode result:\n") d.listDir("b", "") } // dumpDefaultACL prints out the default object ACL for this bucket. func (d *demo) dumpDefaultACL() { acl, err := storage.DefaultACL(d.ctx, bucket) if err != nil { d.errorf("defaultACL: unable to list default object ACL for bucket %q: %v", bucket, err) return } for _, v := range acl { fmt.Fprintf(d.w, "Entity: %q, Role: %q\n", v.Entity, v.Role) } } // defaultACL displays the default object ACL for this bucket. func (d *demo) defaultACL() { io.WriteString(d.w, "\nDefault object ACL:\n") d.dumpDefaultACL() } // putDefaultACLRule adds the "allUsers" default object ACL rule for this bucket. func (d *demo) putDefaultACLRule() { io.WriteString(d.w, "\nPut Default object ACL Rule:\n") err := storage.PutDefaultACLRule(d.ctx, bucket, "allUsers", storage.RoleReader) if err != nil { d.errorf("putDefaultACLRule: unable to save default object ACL rule for bucket %q: %v", bucket, err) return } d.dumpDefaultACL() } // deleteDefaultACLRule deleted the "allUsers" default object ACL rule for this bucket. func (d *demo) deleteDefaultACLRule() { io.WriteString(d.w, "\nDelete Default object ACL Rule:\n") err := storage.DeleteDefaultACLRule(d.ctx, bucket, "allUsers") if err != nil { d.errorf("deleteDefaultACLRule: unable to delete default object ACL rule for bucket %q: %v", bucket, err) return } d.dumpDefaultACL() } // dumpBucketACL prints out the bucket ACL. func (d *demo) dumpBucketACL() { acl, err := storage.BucketACL(d.ctx, bucket) if err != nil { d.errorf("dumpBucketACL: unable to list bucket ACL for bucket %q: %v", bucket, err) return } for _, v := range acl { fmt.Fprintf(d.w, "Entity: %q, Role: %q\n", v.Entity, v.Role) } } // bucketACL displays the bucket ACL for this bucket. func (d *demo) bucketACL() { io.WriteString(d.w, "\nBucket ACL:\n") d.dumpBucketACL() } // putBucketACLRule adds the "allUsers" bucket ACL rule for this bucket. func (d *demo) putBucketACLRule() { io.WriteString(d.w, "\nPut Bucket ACL Rule:\n") err := storage.PutBucketACLRule(d.ctx, bucket, "allUsers", storage.RoleReader) if err != nil { d.errorf("putBucketACLRule: unable to save bucket ACL rule for bucket %q: %v", bucket, err) return } d.dumpBucketACL() } // deleteBucketACLRule deleted the "allUsers" bucket ACL rule for this bucket. func (d *demo) deleteBucketACLRule() { io.WriteString(d.w, "\nDelete Bucket ACL Rule:\n") err := storage.DeleteBucketACLRule(d.ctx, bucket, "allUsers") if err != nil { d.errorf("deleteBucketACLRule: unable to delete bucket ACL rule for bucket %q: %v", bucket, err) return } d.dumpBucketACL() } // dumpACL prints out the ACL of the named file. func (d *demo) dumpACL(fileName string) { acl, err := storage.ACL(d.ctx, bucket, fileName) if err != nil { d.errorf("dumpACL: unable to list file ACL for bucket %q, file %q: %v", bucket, fileName, err) return } for _, v := range acl { fmt.Fprintf(d.w, "Entity: %q, Role: %q\n", v.Entity, v.Role) } } // acl displays the ACL for the named file. func (d *demo) acl(fileName string) { fmt.Fprintf(d.w, "\nACL for file %v:\n", fileName) d.dumpACL(fileName) } // putACLRule adds the "allUsers" ACL rule for the named file. func (d *demo) putACLRule(fileName string) { fmt.Fprintf(d.w, "\nPut ACL rule for file %v:\n", fileName) err := storage.PutACLRule(d.ctx, bucket, fileName, "allUsers", storage.RoleReader) if err != nil { d.errorf("putACLRule: unable to save ACL rule for bucket %q, file %q: %v", bucket, fileName, err) return } d.dumpACL(fileName) } // deleteACLRule deleted the "allUsers" ACL rule for the named file. func (d *demo) deleteACLRule(fileName string) { fmt.Fprintf(d.w, "\nDelete ACL rule for file %v:\n", fileName) err := storage.DeleteACLRule(d.ctx, bucket, fileName, "allUsers") if err != nil { d.errorf("deleteACLRule: unable to delete ACL rule for bucket %q, file %q: %v", bucket, fileName, err) return } d.dumpACL(fileName) } // deleteFiles deletes all the temporary files from a bucket created by this demo. func (d *demo) deleteFiles() { io.WriteString(d.w, "\nDeleting files...\n") for _, v := range d.cleanUp { fmt.Fprintf(d.w, "Deleting file %v\n", v) if err := storage.DeleteObject(d.ctx, bucket, v); err != nil { d.errorf("deleteFiles: unable to delete bucket %q, file %q: %v", bucket, v, err) return } } } ================================================ FILE: vendor/google.golang.org/cloud/examples/storage/appengine/app.yaml ================================================ application: version: v1 runtime: go api_version: go1 handlers: - url: /.* script: _go_app ================================================ FILE: vendor/google.golang.org/cloud/examples/storage/appenginevm/app.go ================================================ // Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package gcsdemo is an example App Engine or Mananged VM app using the Google Cloud Storage API. package gcsdemo import ( "bytes" "fmt" "io" "io/ioutil" "net/http" "strings" "golang.org/x/net/context" "golang.org/x/oauth2" "golang.org/x/oauth2/google" "google.golang.org/appengine" "google.golang.org/appengine/file" "google.golang.org/appengine/log" "google.golang.org/appengine/urlfetch" "google.golang.org/cloud" "google.golang.org/cloud/storage" ) // bucket is a local cache of the app's default bucket name. var bucket string // or: var bucket = ".appspot.com" func init() { http.HandleFunc("/", handler) } // demo struct holds information needed to run the various demo functions. type demo struct { c context.Context w http.ResponseWriter ctx context.Context // cleanUp is a list of filenames that need cleaning up at the end of the demo. cleanUp []string // failed indicates that one or more of the demo steps failed. failed bool } func (d *demo) errorf(format string, args ...interface{}) { d.failed = true log.Errorf(d.c, format, args...) } // handler is the main demo entry point that calls the GCS operations. func handler(w http.ResponseWriter, r *http.Request) { if r.URL.Path != "/" { http.NotFound(w, r) return } c := appengine.NewContext(r) if bucket == "" { var err error if bucket, err = file.DefaultBucketName(c); err != nil { log.Errorf(c, "failed to get default GCS bucket name: %v", err) return } } hc := &http.Client{ Transport: &oauth2.Transport{ Source: google.AppEngineTokenSource(c, storage.ScopeFullControl), Base: &urlfetch.Transport{Context: c}, }, } ctx := cloud.NewContext(appengine.AppID(c), hc) w.Header().Set("Content-Type", "text/plain; charset=utf-8") fmt.Fprintf(w, "Demo GCS Application running from Version: %v\n", appengine.VersionID(c)) fmt.Fprintf(w, "Using bucket name: %v\n\n", bucket) d := &demo{ c: c, w: w, ctx: ctx, } n := "demo-testfile-go" d.createFile(n) d.readFile(n) d.copyFile(n) d.statFile(n) d.createListFiles() d.listBucket() d.listBucketDirMode() d.defaultACL() d.putDefaultACLRule() d.deleteDefaultACLRule() d.bucketACL() d.putBucketACLRule() d.deleteBucketACLRule() d.acl(n) d.putACLRule(n) d.deleteACLRule(n) d.deleteFiles() if d.failed { io.WriteString(w, "\nDemo failed.\n") } else { io.WriteString(w, "\nDemo succeeded.\n") } } // createFile creates a file in Google Cloud Storage. func (d *demo) createFile(fileName string) { fmt.Fprintf(d.w, "Creating file /%v/%v\n", bucket, fileName) wc := storage.NewWriter(d.ctx, bucket, fileName) wc.ContentType = "text/plain" wc.Metadata = map[string]string{ "x-goog-meta-foo": "foo", "x-goog-meta-bar": "bar", } d.cleanUp = append(d.cleanUp, fileName) if _, err := wc.Write([]byte("abcde\n")); err != nil { d.errorf("createFile: unable to write data to bucket %q, file %q: %v", bucket, fileName, err) return } if _, err := wc.Write([]byte(strings.Repeat("f", 1024*4) + "\n")); err != nil { d.errorf("createFile: unable to write data to bucket %q, file %q: %v", bucket, fileName, err) return } if err := wc.Close(); err != nil { d.errorf("createFile: unable to close bucket %q, file %q: %v", bucket, fileName, err) return } } // readFile reads the named file in Google Cloud Storage. func (d *demo) readFile(fileName string) { io.WriteString(d.w, "\nAbbreviated file content (first line and last 1K):\n") rc, err := storage.NewReader(d.ctx, bucket, fileName) if err != nil { d.errorf("readFile: unable to open file from bucket %q, file %q: %v", bucket, fileName, err) return } defer rc.Close() slurp, err := ioutil.ReadAll(rc) if err != nil { d.errorf("readFile: unable to read data from bucket %q, file %q: %v", bucket, fileName, err) return } fmt.Fprintf(d.w, "%s\n", bytes.SplitN(slurp, []byte("\n"), 2)[0]) if len(slurp) > 1024 { fmt.Fprintf(d.w, "...%s\n", slurp[len(slurp)-1024:]) } else { fmt.Fprintf(d.w, "%s\n", slurp) } } // copyFile copies a file in Google Cloud Storage. func (d *demo) copyFile(fileName string) { copyName := fileName + "-copy" fmt.Fprintf(d.w, "Copying file /%v/%v to /%v/%v:\n", bucket, fileName, bucket, copyName) obj, err := storage.CopyObject(d.ctx, bucket, fileName, bucket, copyName, nil) if err != nil { d.errorf("copyFile: unable to copy /%v/%v to bucket %q, file %q: %v", bucket, fileName, bucket, copyName, err) return } d.cleanUp = append(d.cleanUp, copyName) d.dumpStats(obj) } func (d *demo) dumpStats(obj *storage.Object) { fmt.Fprintf(d.w, "(filename: /%v/%v, ", obj.Bucket, obj.Name) fmt.Fprintf(d.w, "ContentType: %q, ", obj.ContentType) fmt.Fprintf(d.w, "ACL: %#v, ", obj.ACL) fmt.Fprintf(d.w, "Owner: %v, ", obj.Owner) fmt.Fprintf(d.w, "ContentEncoding: %q, ", obj.ContentEncoding) fmt.Fprintf(d.w, "Size: %v, ", obj.Size) fmt.Fprintf(d.w, "MD5: %q, ", obj.MD5) fmt.Fprintf(d.w, "CRC32C: %q, ", obj.CRC32C) fmt.Fprintf(d.w, "Metadata: %#v, ", obj.Metadata) fmt.Fprintf(d.w, "MediaLink: %q, ", obj.MediaLink) fmt.Fprintf(d.w, "StorageClass: %q, ", obj.StorageClass) if !obj.Deleted.IsZero() { fmt.Fprintf(d.w, "Deleted: %v, ", obj.Deleted) } fmt.Fprintf(d.w, "Updated: %v)\n", obj.Updated) } // statFile reads the stats of the named file in Google Cloud Storage. func (d *demo) statFile(fileName string) { io.WriteString(d.w, "\nFile stat:\n") obj, err := storage.StatObject(d.ctx, bucket, fileName) if err != nil { d.errorf("statFile: unable to stat file from bucket %q, file %q: %v", bucket, fileName, err) return } d.dumpStats(obj) } // createListFiles creates files that will be used by listBucket. func (d *demo) createListFiles() { io.WriteString(d.w, "\nCreating more files for listbucket...\n") for _, n := range []string{"foo1", "foo2", "bar", "bar/1", "bar/2", "boo/"} { d.createFile(n) } } // listBucket lists the contents of a bucket in Google Cloud Storage. func (d *demo) listBucket() { io.WriteString(d.w, "\nListbucket result:\n") query := &storage.Query{Prefix: "foo"} for query != nil { objs, err := storage.ListObjects(d.ctx, bucket, query) if err != nil { d.errorf("listBucket: unable to list bucket %q: %v", bucket, err) return } query = objs.Next for _, obj := range objs.Results { d.dumpStats(obj) } } } func (d *demo) listDir(name, indent string) { query := &storage.Query{Prefix: name, Delimiter: "/"} for query != nil { objs, err := storage.ListObjects(d.ctx, bucket, query) if err != nil { d.errorf("listBucketDirMode: unable to list bucket %q: %v", bucket, err) return } query = objs.Next for _, obj := range objs.Results { fmt.Fprint(d.w, indent) d.dumpStats(obj) } for _, dir := range objs.Prefixes { fmt.Fprintf(d.w, "%v(directory: /%v/%v)\n", indent, bucket, dir) d.listDir(dir, indent+" ") } } } // listBucketDirMode lists the contents of a bucket in dir mode in Google Cloud Storage. func (d *demo) listBucketDirMode() { io.WriteString(d.w, "\nListbucket directory mode result:\n") d.listDir("b", "") } // dumpDefaultACL prints out the default object ACL for this bucket. func (d *demo) dumpDefaultACL() { acl, err := storage.DefaultACL(d.ctx, bucket) if err != nil { d.errorf("defaultACL: unable to list default object ACL for bucket %q: %v", bucket, err) return } for _, v := range acl { fmt.Fprintf(d.w, "Entity: %q, Role: %q\n", v.Entity, v.Role) } } // defaultACL displays the default object ACL for this bucket. func (d *demo) defaultACL() { io.WriteString(d.w, "\nDefault object ACL:\n") d.dumpDefaultACL() } // putDefaultACLRule adds the "allUsers" default object ACL rule for this bucket. func (d *demo) putDefaultACLRule() { io.WriteString(d.w, "\nPut Default object ACL Rule:\n") err := storage.PutDefaultACLRule(d.ctx, bucket, "allUsers", storage.RoleReader) if err != nil { d.errorf("putDefaultACLRule: unable to save default object ACL rule for bucket %q: %v", bucket, err) return } d.dumpDefaultACL() } // deleteDefaultACLRule deleted the "allUsers" default object ACL rule for this bucket. func (d *demo) deleteDefaultACLRule() { io.WriteString(d.w, "\nDelete Default object ACL Rule:\n") err := storage.DeleteDefaultACLRule(d.ctx, bucket, "allUsers") if err != nil { d.errorf("deleteDefaultACLRule: unable to delete default object ACL rule for bucket %q: %v", bucket, err) return } d.dumpDefaultACL() } // dumpBucketACL prints out the bucket ACL. func (d *demo) dumpBucketACL() { acl, err := storage.BucketACL(d.ctx, bucket) if err != nil { d.errorf("dumpBucketACL: unable to list bucket ACL for bucket %q: %v", bucket, err) return } for _, v := range acl { fmt.Fprintf(d.w, "Entity: %q, Role: %q\n", v.Entity, v.Role) } } // bucketACL displays the bucket ACL for this bucket. func (d *demo) bucketACL() { io.WriteString(d.w, "\nBucket ACL:\n") d.dumpBucketACL() } // putBucketACLRule adds the "allUsers" bucket ACL rule for this bucket. func (d *demo) putBucketACLRule() { io.WriteString(d.w, "\nPut Bucket ACL Rule:\n") err := storage.PutBucketACLRule(d.ctx, bucket, "allUsers", storage.RoleReader) if err != nil { d.errorf("putBucketACLRule: unable to save bucket ACL rule for bucket %q: %v", bucket, err) return } d.dumpBucketACL() } // deleteBucketACLRule deleted the "allUsers" bucket ACL rule for this bucket. func (d *demo) deleteBucketACLRule() { io.WriteString(d.w, "\nDelete Bucket ACL Rule:\n") err := storage.DeleteBucketACLRule(d.ctx, bucket, "allUsers") if err != nil { d.errorf("deleteBucketACLRule: unable to delete bucket ACL rule for bucket %q: %v", bucket, err) return } d.dumpBucketACL() } // dumpACL prints out the ACL of the named file. func (d *demo) dumpACL(fileName string) { acl, err := storage.ACL(d.ctx, bucket, fileName) if err != nil { d.errorf("dumpACL: unable to list file ACL for bucket %q, file %q: %v", bucket, fileName, err) return } for _, v := range acl { fmt.Fprintf(d.w, "Entity: %q, Role: %q\n", v.Entity, v.Role) } } // acl displays the ACL for the named file. func (d *demo) acl(fileName string) { fmt.Fprintf(d.w, "\nACL for file %v:\n", fileName) d.dumpACL(fileName) } // putACLRule adds the "allUsers" ACL rule for the named file. func (d *demo) putACLRule(fileName string) { fmt.Fprintf(d.w, "\nPut ACL rule for file %v:\n", fileName) err := storage.PutACLRule(d.ctx, bucket, fileName, "allUsers", storage.RoleReader) if err != nil { d.errorf("putACLRule: unable to save ACL rule for bucket %q, file %q: %v", bucket, fileName, err) return } d.dumpACL(fileName) } // deleteACLRule deleted the "allUsers" ACL rule for the named file. func (d *demo) deleteACLRule(fileName string) { fmt.Fprintf(d.w, "\nDelete ACL rule for file %v:\n", fileName) err := storage.DeleteACLRule(d.ctx, bucket, fileName, "allUsers") if err != nil { d.errorf("deleteACLRule: unable to delete ACL rule for bucket %q, file %q: %v", bucket, fileName, err) return } d.dumpACL(fileName) } // deleteFiles deletes all the temporary files from a bucket created by this demo. func (d *demo) deleteFiles() { io.WriteString(d.w, "\nDeleting files...\n") for _, v := range d.cleanUp { fmt.Fprintf(d.w, "Deleting file %v\n", v) if err := storage.DeleteObject(d.ctx, bucket, v); err != nil { d.errorf("deleteFiles: unable to delete bucket %q, file %q: %v", bucket, v, err) return } } } ================================================ FILE: vendor/google.golang.org/cloud/examples/storage/appenginevm/app.yaml ================================================ application: version: v1 runtime: go api_version: go1 vm: true manual_scaling: instances: 1 handlers: - url: /.* script: _go_app ================================================ FILE: vendor/google.golang.org/cloud/internal/cloud.go ================================================ // Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package internal provides support for the cloud packages. // // Users should not import this package directly. package internal import ( "fmt" "net/http" "sync" "golang.org/x/net/context" ) type contextKey struct{} func WithContext(parent context.Context, projID string, c *http.Client) context.Context { if c == nil { panic("nil *http.Client passed to WithContext") } if projID == "" { panic("empty project ID passed to WithContext") } return context.WithValue(parent, contextKey{}, &cloudContext{ ProjectID: projID, HTTPClient: c, }) } const userAgent = "gcloud-golang/0.1" type cloudContext struct { ProjectID string HTTPClient *http.Client mu sync.Mutex // guards svc svc map[string]interface{} // e.g. "storage" => *rawStorage.Service } // Service returns the result of the fill function if it's never been // called before for the given name (which is assumed to be an API // service name, like "datastore"). If it has already been cached, the fill // func is not run. // It's safe for concurrent use by multiple goroutines. func Service(ctx context.Context, name string, fill func(*http.Client) interface{}) interface{} { return cc(ctx).service(name, fill) } func (c *cloudContext) service(name string, fill func(*http.Client) interface{}) interface{} { c.mu.Lock() defer c.mu.Unlock() if c.svc == nil { c.svc = make(map[string]interface{}) } else if v, ok := c.svc[name]; ok { return v } v := fill(c.HTTPClient) c.svc[name] = v return v } // Transport is an http.RoundTripper that appends // Google Cloud client's user-agent to the original // request's user-agent header. type Transport struct { // Base represents the actual http.RoundTripper // the requests will be delegated to. Base http.RoundTripper } // RoundTrip appends a user-agent to the existing user-agent // header and delegates the request to the base http.RoundTripper. func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { req = cloneRequest(req) ua := req.Header.Get("User-Agent") if ua == "" { ua = userAgent } else { ua = fmt.Sprintf("%s %s", ua, userAgent) } req.Header.Set("User-Agent", ua) return t.Base.RoundTrip(req) } // cloneRequest returns a clone of the provided *http.Request. // The clone is a shallow copy of the struct and its Header map. func cloneRequest(r *http.Request) *http.Request { // shallow copy of the struct r2 := new(http.Request) *r2 = *r // deep copy of the Header r2.Header = make(http.Header) for k, s := range r.Header { r2.Header[k] = s } return r2 } func ProjID(ctx context.Context) string { return cc(ctx).ProjectID } func HTTPClient(ctx context.Context) *http.Client { return cc(ctx).HTTPClient } // cc returns the internal *cloudContext (cc) state for a context.Context. // It panics if the user did it wrong. func cc(ctx context.Context) *cloudContext { if c, ok := ctx.Value(contextKey{}).(*cloudContext); ok { return c } panic("invalid context.Context type; it should be created with cloud.NewContext") } ================================================ FILE: vendor/google.golang.org/cloud/internal/datastore/datastore_v1.pb.go ================================================ // Code generated by protoc-gen-go. // source: datastore_v1.proto // DO NOT EDIT! /* Package datastore is a generated protocol buffer package. It is generated from these files: datastore_v1.proto It has these top-level messages: PartitionId Key Value Property Entity EntityResult Query KindExpression PropertyReference PropertyExpression PropertyOrder Filter CompositeFilter PropertyFilter GqlQuery GqlQueryArg QueryResultBatch Mutation MutationResult ReadOptions LookupRequest LookupResponse RunQueryRequest RunQueryResponse BeginTransactionRequest BeginTransactionResponse RollbackRequest RollbackResponse CommitRequest CommitResponse AllocateIdsRequest AllocateIdsResponse */ package datastore import proto "github.com/golang/protobuf/proto" import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = math.Inf // Specifies what data the 'entity' field contains. // A ResultType is either implied (for example, in LookupResponse.found it // is always FULL) or specified by context (for example, in message // QueryResultBatch, field 'entity_result_type' specifies a ResultType // for all the values in field 'entity_result'). type EntityResult_ResultType int32 const ( EntityResult_FULL EntityResult_ResultType = 1 EntityResult_PROJECTION EntityResult_ResultType = 2 // The entity may have no key. // A property value may have meaning 18. EntityResult_KEY_ONLY EntityResult_ResultType = 3 ) var EntityResult_ResultType_name = map[int32]string{ 1: "FULL", 2: "PROJECTION", 3: "KEY_ONLY", } var EntityResult_ResultType_value = map[string]int32{ "FULL": 1, "PROJECTION": 2, "KEY_ONLY": 3, } func (x EntityResult_ResultType) Enum() *EntityResult_ResultType { p := new(EntityResult_ResultType) *p = x return p } func (x EntityResult_ResultType) String() string { return proto.EnumName(EntityResult_ResultType_name, int32(x)) } func (x *EntityResult_ResultType) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(EntityResult_ResultType_value, data, "EntityResult_ResultType") if err != nil { return err } *x = EntityResult_ResultType(value) return nil } type PropertyExpression_AggregationFunction int32 const ( PropertyExpression_FIRST PropertyExpression_AggregationFunction = 1 ) var PropertyExpression_AggregationFunction_name = map[int32]string{ 1: "FIRST", } var PropertyExpression_AggregationFunction_value = map[string]int32{ "FIRST": 1, } func (x PropertyExpression_AggregationFunction) Enum() *PropertyExpression_AggregationFunction { p := new(PropertyExpression_AggregationFunction) *p = x return p } func (x PropertyExpression_AggregationFunction) String() string { return proto.EnumName(PropertyExpression_AggregationFunction_name, int32(x)) } func (x *PropertyExpression_AggregationFunction) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(PropertyExpression_AggregationFunction_value, data, "PropertyExpression_AggregationFunction") if err != nil { return err } *x = PropertyExpression_AggregationFunction(value) return nil } type PropertyOrder_Direction int32 const ( PropertyOrder_ASCENDING PropertyOrder_Direction = 1 PropertyOrder_DESCENDING PropertyOrder_Direction = 2 ) var PropertyOrder_Direction_name = map[int32]string{ 1: "ASCENDING", 2: "DESCENDING", } var PropertyOrder_Direction_value = map[string]int32{ "ASCENDING": 1, "DESCENDING": 2, } func (x PropertyOrder_Direction) Enum() *PropertyOrder_Direction { p := new(PropertyOrder_Direction) *p = x return p } func (x PropertyOrder_Direction) String() string { return proto.EnumName(PropertyOrder_Direction_name, int32(x)) } func (x *PropertyOrder_Direction) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(PropertyOrder_Direction_value, data, "PropertyOrder_Direction") if err != nil { return err } *x = PropertyOrder_Direction(value) return nil } type CompositeFilter_Operator int32 const ( CompositeFilter_AND CompositeFilter_Operator = 1 ) var CompositeFilter_Operator_name = map[int32]string{ 1: "AND", } var CompositeFilter_Operator_value = map[string]int32{ "AND": 1, } func (x CompositeFilter_Operator) Enum() *CompositeFilter_Operator { p := new(CompositeFilter_Operator) *p = x return p } func (x CompositeFilter_Operator) String() string { return proto.EnumName(CompositeFilter_Operator_name, int32(x)) } func (x *CompositeFilter_Operator) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(CompositeFilter_Operator_value, data, "CompositeFilter_Operator") if err != nil { return err } *x = CompositeFilter_Operator(value) return nil } type PropertyFilter_Operator int32 const ( PropertyFilter_LESS_THAN PropertyFilter_Operator = 1 PropertyFilter_LESS_THAN_OR_EQUAL PropertyFilter_Operator = 2 PropertyFilter_GREATER_THAN PropertyFilter_Operator = 3 PropertyFilter_GREATER_THAN_OR_EQUAL PropertyFilter_Operator = 4 PropertyFilter_EQUAL PropertyFilter_Operator = 5 PropertyFilter_HAS_ANCESTOR PropertyFilter_Operator = 11 ) var PropertyFilter_Operator_name = map[int32]string{ 1: "LESS_THAN", 2: "LESS_THAN_OR_EQUAL", 3: "GREATER_THAN", 4: "GREATER_THAN_OR_EQUAL", 5: "EQUAL", 11: "HAS_ANCESTOR", } var PropertyFilter_Operator_value = map[string]int32{ "LESS_THAN": 1, "LESS_THAN_OR_EQUAL": 2, "GREATER_THAN": 3, "GREATER_THAN_OR_EQUAL": 4, "EQUAL": 5, "HAS_ANCESTOR": 11, } func (x PropertyFilter_Operator) Enum() *PropertyFilter_Operator { p := new(PropertyFilter_Operator) *p = x return p } func (x PropertyFilter_Operator) String() string { return proto.EnumName(PropertyFilter_Operator_name, int32(x)) } func (x *PropertyFilter_Operator) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(PropertyFilter_Operator_value, data, "PropertyFilter_Operator") if err != nil { return err } *x = PropertyFilter_Operator(value) return nil } // The possible values for the 'more_results' field. type QueryResultBatch_MoreResultsType int32 const ( QueryResultBatch_NOT_FINISHED QueryResultBatch_MoreResultsType = 1 QueryResultBatch_MORE_RESULTS_AFTER_LIMIT QueryResultBatch_MoreResultsType = 2 // results after the limit. QueryResultBatch_NO_MORE_RESULTS QueryResultBatch_MoreResultsType = 3 ) var QueryResultBatch_MoreResultsType_name = map[int32]string{ 1: "NOT_FINISHED", 2: "MORE_RESULTS_AFTER_LIMIT", 3: "NO_MORE_RESULTS", } var QueryResultBatch_MoreResultsType_value = map[string]int32{ "NOT_FINISHED": 1, "MORE_RESULTS_AFTER_LIMIT": 2, "NO_MORE_RESULTS": 3, } func (x QueryResultBatch_MoreResultsType) Enum() *QueryResultBatch_MoreResultsType { p := new(QueryResultBatch_MoreResultsType) *p = x return p } func (x QueryResultBatch_MoreResultsType) String() string { return proto.EnumName(QueryResultBatch_MoreResultsType_name, int32(x)) } func (x *QueryResultBatch_MoreResultsType) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(QueryResultBatch_MoreResultsType_value, data, "QueryResultBatch_MoreResultsType") if err != nil { return err } *x = QueryResultBatch_MoreResultsType(value) return nil } type ReadOptions_ReadConsistency int32 const ( ReadOptions_DEFAULT ReadOptions_ReadConsistency = 0 ReadOptions_STRONG ReadOptions_ReadConsistency = 1 ReadOptions_EVENTUAL ReadOptions_ReadConsistency = 2 ) var ReadOptions_ReadConsistency_name = map[int32]string{ 0: "DEFAULT", 1: "STRONG", 2: "EVENTUAL", } var ReadOptions_ReadConsistency_value = map[string]int32{ "DEFAULT": 0, "STRONG": 1, "EVENTUAL": 2, } func (x ReadOptions_ReadConsistency) Enum() *ReadOptions_ReadConsistency { p := new(ReadOptions_ReadConsistency) *p = x return p } func (x ReadOptions_ReadConsistency) String() string { return proto.EnumName(ReadOptions_ReadConsistency_name, int32(x)) } func (x *ReadOptions_ReadConsistency) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(ReadOptions_ReadConsistency_value, data, "ReadOptions_ReadConsistency") if err != nil { return err } *x = ReadOptions_ReadConsistency(value) return nil } type BeginTransactionRequest_IsolationLevel int32 const ( BeginTransactionRequest_SNAPSHOT BeginTransactionRequest_IsolationLevel = 0 // conflict if their mutations conflict. For example: // Read(A),Write(B) may not conflict with Read(B),Write(A), // but Read(B),Write(B) does conflict with Read(B),Write(B). BeginTransactionRequest_SERIALIZABLE BeginTransactionRequest_IsolationLevel = 1 ) var BeginTransactionRequest_IsolationLevel_name = map[int32]string{ 0: "SNAPSHOT", 1: "SERIALIZABLE", } var BeginTransactionRequest_IsolationLevel_value = map[string]int32{ "SNAPSHOT": 0, "SERIALIZABLE": 1, } func (x BeginTransactionRequest_IsolationLevel) Enum() *BeginTransactionRequest_IsolationLevel { p := new(BeginTransactionRequest_IsolationLevel) *p = x return p } func (x BeginTransactionRequest_IsolationLevel) String() string { return proto.EnumName(BeginTransactionRequest_IsolationLevel_name, int32(x)) } func (x *BeginTransactionRequest_IsolationLevel) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(BeginTransactionRequest_IsolationLevel_value, data, "BeginTransactionRequest_IsolationLevel") if err != nil { return err } *x = BeginTransactionRequest_IsolationLevel(value) return nil } type CommitRequest_Mode int32 const ( CommitRequest_TRANSACTIONAL CommitRequest_Mode = 1 CommitRequest_NON_TRANSACTIONAL CommitRequest_Mode = 2 ) var CommitRequest_Mode_name = map[int32]string{ 1: "TRANSACTIONAL", 2: "NON_TRANSACTIONAL", } var CommitRequest_Mode_value = map[string]int32{ "TRANSACTIONAL": 1, "NON_TRANSACTIONAL": 2, } func (x CommitRequest_Mode) Enum() *CommitRequest_Mode { p := new(CommitRequest_Mode) *p = x return p } func (x CommitRequest_Mode) String() string { return proto.EnumName(CommitRequest_Mode_name, int32(x)) } func (x *CommitRequest_Mode) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(CommitRequest_Mode_value, data, "CommitRequest_Mode") if err != nil { return err } *x = CommitRequest_Mode(value) return nil } // An identifier for a particular subset of entities. // // Entities are partitioned into various subsets, each used by different // datasets and different namespaces within a dataset and so forth. // // All input partition IDs are normalized before use. // A partition ID is normalized as follows: // If the partition ID is unset or is set to an empty partition ID, replace it // with the context partition ID. // Otherwise, if the partition ID has no dataset ID, assign it the context // partition ID's dataset ID. // Unless otherwise documented, the context partition ID has the dataset ID set // to the context dataset ID and no other partition dimension set. // // A partition ID is empty if all of its fields are unset. // // Partition dimension: // A dimension may be unset. // A dimension's value must never be "". // A dimension's value must match [A-Za-z\d\.\-_]{1,100} // If the value of any dimension matches regex "__.*__", // the partition is reserved/read-only. // A reserved/read-only partition ID is forbidden in certain documented contexts. // // Dataset ID: // A dataset id's value must never be "". // A dataset id's value must match // ([a-z\d\-]{1,100}~)?([a-z\d][a-z\d\-\.]{0,99}:)?([a-z\d][a-z\d\-]{0,99} type PartitionId struct { // The dataset ID. DatasetId *string `protobuf:"bytes,3,opt,name=dataset_id" json:"dataset_id,omitempty"` // The namespace. Namespace *string `protobuf:"bytes,4,opt,name=namespace" json:"namespace,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *PartitionId) Reset() { *m = PartitionId{} } func (m *PartitionId) String() string { return proto.CompactTextString(m) } func (*PartitionId) ProtoMessage() {} func (m *PartitionId) GetDatasetId() string { if m != nil && m.DatasetId != nil { return *m.DatasetId } return "" } func (m *PartitionId) GetNamespace() string { if m != nil && m.Namespace != nil { return *m.Namespace } return "" } // A unique identifier for an entity. // If a key's partition id or any of its path kinds or names are // reserved/read-only, the key is reserved/read-only. // A reserved/read-only key is forbidden in certain documented contexts. type Key struct { // Entities are partitioned into subsets, currently identified by a dataset // (usually implicitly specified by the project) and namespace ID. // Queries are scoped to a single partition. PartitionId *PartitionId `protobuf:"bytes,1,opt,name=partition_id" json:"partition_id,omitempty"` // The entity path. // An entity path consists of one or more elements composed of a kind and a // string or numerical identifier, which identify entities. The first // element identifies a root entity, the second element identifies // a child of the root entity, the third element a child of the // second entity, and so forth. The entities identified by all prefixes of // the path are called the element's ancestors. // An entity path is always fully complete: ALL of the entity's ancestors // are required to be in the path along with the entity identifier itself. // The only exception is that in some documented cases, the identifier in the // last path element (for the entity) itself may be omitted. A path can never // be empty. PathElement []*Key_PathElement `protobuf:"bytes,2,rep,name=path_element" json:"path_element,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Key) Reset() { *m = Key{} } func (m *Key) String() string { return proto.CompactTextString(m) } func (*Key) ProtoMessage() {} func (m *Key) GetPartitionId() *PartitionId { if m != nil { return m.PartitionId } return nil } func (m *Key) GetPathElement() []*Key_PathElement { if m != nil { return m.PathElement } return nil } // A (kind, ID/name) pair used to construct a key path. // // At most one of name or ID may be set. // If either is set, the element is complete. // If neither is set, the element is incomplete. type Key_PathElement struct { // The kind of the entity. // A kind matching regex "__.*__" is reserved/read-only. // A kind must not contain more than 500 characters. // Cannot be "". Kind *string `protobuf:"bytes,1,req,name=kind" json:"kind,omitempty"` // The ID of the entity. // Never equal to zero. Values less than zero are discouraged and will not // be supported in the future. Id *int64 `protobuf:"varint,2,opt,name=id" json:"id,omitempty"` // The name of the entity. // A name matching regex "__.*__" is reserved/read-only. // A name must not be more than 500 characters. // Cannot be "". Name *string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Key_PathElement) Reset() { *m = Key_PathElement{} } func (m *Key_PathElement) String() string { return proto.CompactTextString(m) } func (*Key_PathElement) ProtoMessage() {} func (m *Key_PathElement) GetKind() string { if m != nil && m.Kind != nil { return *m.Kind } return "" } func (m *Key_PathElement) GetId() int64 { if m != nil && m.Id != nil { return *m.Id } return 0 } func (m *Key_PathElement) GetName() string { if m != nil && m.Name != nil { return *m.Name } return "" } // A message that can hold any of the supported value types and associated // metadata. // // At most one of the Value fields may be set. // If none are set the value is "null". // type Value struct { // A boolean value. BooleanValue *bool `protobuf:"varint,1,opt,name=boolean_value" json:"boolean_value,omitempty"` // An integer value. IntegerValue *int64 `protobuf:"varint,2,opt,name=integer_value" json:"integer_value,omitempty"` // A double value. DoubleValue *float64 `protobuf:"fixed64,3,opt,name=double_value" json:"double_value,omitempty"` // A timestamp value. TimestampMicrosecondsValue *int64 `protobuf:"varint,4,opt,name=timestamp_microseconds_value" json:"timestamp_microseconds_value,omitempty"` // A key value. KeyValue *Key `protobuf:"bytes,5,opt,name=key_value" json:"key_value,omitempty"` // A blob key value. BlobKeyValue *string `protobuf:"bytes,16,opt,name=blob_key_value" json:"blob_key_value,omitempty"` // A UTF-8 encoded string value. StringValue *string `protobuf:"bytes,17,opt,name=string_value" json:"string_value,omitempty"` // A blob value. BlobValue []byte `protobuf:"bytes,18,opt,name=blob_value" json:"blob_value,omitempty"` // An entity value. // May have no key. // May have a key with an incomplete key path. // May have a reserved/read-only key. EntityValue *Entity `protobuf:"bytes,6,opt,name=entity_value" json:"entity_value,omitempty"` // A list value. // Cannot contain another list value. // Cannot also have a meaning and indexing set. ListValue []*Value `protobuf:"bytes,7,rep,name=list_value" json:"list_value,omitempty"` // The meaning field is reserved and should not be used. Meaning *int32 `protobuf:"varint,14,opt,name=meaning" json:"meaning,omitempty"` // If the value should be indexed. // // The indexed property may be set for a // null value. // When indexed is true, stringValue // is limited to 500 characters and the blob value is limited to 500 bytes. // Exception: If meaning is set to 2, string_value is limited to 2038 // characters regardless of indexed. // When indexed is true, meaning 15 and 22 are not allowed, and meaning 16 // will be ignored on input (and will never be set on output). // Input values by default have indexed set to // true; however, you can explicitly set indexed to // true if you want. (An output value never has // indexed explicitly set to true.) If a value is // itself an entity, it cannot have indexed set to // true. // Exception: An entity value with meaning 9, 20 or 21 may be indexed. Indexed *bool `protobuf:"varint,15,opt,name=indexed,def=1" json:"indexed,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Value) Reset() { *m = Value{} } func (m *Value) String() string { return proto.CompactTextString(m) } func (*Value) ProtoMessage() {} const Default_Value_Indexed bool = true func (m *Value) GetBooleanValue() bool { if m != nil && m.BooleanValue != nil { return *m.BooleanValue } return false } func (m *Value) GetIntegerValue() int64 { if m != nil && m.IntegerValue != nil { return *m.IntegerValue } return 0 } func (m *Value) GetDoubleValue() float64 { if m != nil && m.DoubleValue != nil { return *m.DoubleValue } return 0 } func (m *Value) GetTimestampMicrosecondsValue() int64 { if m != nil && m.TimestampMicrosecondsValue != nil { return *m.TimestampMicrosecondsValue } return 0 } func (m *Value) GetKeyValue() *Key { if m != nil { return m.KeyValue } return nil } func (m *Value) GetBlobKeyValue() string { if m != nil && m.BlobKeyValue != nil { return *m.BlobKeyValue } return "" } func (m *Value) GetStringValue() string { if m != nil && m.StringValue != nil { return *m.StringValue } return "" } func (m *Value) GetBlobValue() []byte { if m != nil { return m.BlobValue } return nil } func (m *Value) GetEntityValue() *Entity { if m != nil { return m.EntityValue } return nil } func (m *Value) GetListValue() []*Value { if m != nil { return m.ListValue } return nil } func (m *Value) GetMeaning() int32 { if m != nil && m.Meaning != nil { return *m.Meaning } return 0 } func (m *Value) GetIndexed() bool { if m != nil && m.Indexed != nil { return *m.Indexed } return Default_Value_Indexed } // An entity property. type Property struct { // The name of the property. // A property name matching regex "__.*__" is reserved. // A reserved property name is forbidden in certain documented contexts. // The name must not contain more than 500 characters. // Cannot be "". Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` // The value(s) of the property. // Each value can have only one value property populated. For example, // you cannot have a values list of { value: { integerValue: 22, // stringValue: "a" } }, but you can have { value: { listValue: // [ { integerValue: 22 }, { stringValue: "a" } ] }. Value *Value `protobuf:"bytes,4,req,name=value" json:"value,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Property) Reset() { *m = Property{} } func (m *Property) String() string { return proto.CompactTextString(m) } func (*Property) ProtoMessage() {} func (m *Property) GetName() string { if m != nil && m.Name != nil { return *m.Name } return "" } func (m *Property) GetValue() *Value { if m != nil { return m.Value } return nil } // An entity. // // An entity is limited to 1 megabyte when stored. That roughly // corresponds to a limit of 1 megabyte for the serialized form of this // message. type Entity struct { // The entity's key. // // An entity must have a key, unless otherwise documented (for example, // an entity in Value.entityValue may have no key). // An entity's kind is its key's path's last element's kind, // or null if it has no key. Key *Key `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` // The entity's properties. // Each property's name must be unique for its entity. Property []*Property `protobuf:"bytes,2,rep,name=property" json:"property,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Entity) Reset() { *m = Entity{} } func (m *Entity) String() string { return proto.CompactTextString(m) } func (*Entity) ProtoMessage() {} func (m *Entity) GetKey() *Key { if m != nil { return m.Key } return nil } func (m *Entity) GetProperty() []*Property { if m != nil { return m.Property } return nil } // The result of fetching an entity from the datastore. type EntityResult struct { // The resulting entity. Entity *Entity `protobuf:"bytes,1,req,name=entity" json:"entity,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *EntityResult) Reset() { *m = EntityResult{} } func (m *EntityResult) String() string { return proto.CompactTextString(m) } func (*EntityResult) ProtoMessage() {} func (m *EntityResult) GetEntity() *Entity { if m != nil { return m.Entity } return nil } // A query. type Query struct { // The projection to return. If not set the entire entity is returned. Projection []*PropertyExpression `protobuf:"bytes,2,rep,name=projection" json:"projection,omitempty"` // The kinds to query (if empty, returns entities from all kinds). Kind []*KindExpression `protobuf:"bytes,3,rep,name=kind" json:"kind,omitempty"` // The filter to apply (optional). Filter *Filter `protobuf:"bytes,4,opt,name=filter" json:"filter,omitempty"` // The order to apply to the query results (if empty, order is unspecified). Order []*PropertyOrder `protobuf:"bytes,5,rep,name=order" json:"order,omitempty"` // The properties to group by (if empty, no grouping is applied to the // result set). GroupBy []*PropertyReference `protobuf:"bytes,6,rep,name=group_by" json:"group_by,omitempty"` // A starting point for the query results. Optional. Query cursors are // returned in query result batches. StartCursor []byte `protobuf:"bytes,7,opt,name=start_cursor" json:"start_cursor,omitempty"` // An ending point for the query results. Optional. Query cursors are // returned in query result batches. EndCursor []byte `protobuf:"bytes,8,opt,name=end_cursor" json:"end_cursor,omitempty"` // The number of results to skip. Applies before limit, but after all other // constraints (optional, defaults to 0). Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"` // The maximum number of results to return. Applies after all other // constraints. Optional. Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Query) Reset() { *m = Query{} } func (m *Query) String() string { return proto.CompactTextString(m) } func (*Query) ProtoMessage() {} const Default_Query_Offset int32 = 0 func (m *Query) GetProjection() []*PropertyExpression { if m != nil { return m.Projection } return nil } func (m *Query) GetKind() []*KindExpression { if m != nil { return m.Kind } return nil } func (m *Query) GetFilter() *Filter { if m != nil { return m.Filter } return nil } func (m *Query) GetOrder() []*PropertyOrder { if m != nil { return m.Order } return nil } func (m *Query) GetGroupBy() []*PropertyReference { if m != nil { return m.GroupBy } return nil } func (m *Query) GetStartCursor() []byte { if m != nil { return m.StartCursor } return nil } func (m *Query) GetEndCursor() []byte { if m != nil { return m.EndCursor } return nil } func (m *Query) GetOffset() int32 { if m != nil && m.Offset != nil { return *m.Offset } return Default_Query_Offset } func (m *Query) GetLimit() int32 { if m != nil && m.Limit != nil { return *m.Limit } return 0 } // A representation of a kind. type KindExpression struct { // The name of the kind. Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *KindExpression) Reset() { *m = KindExpression{} } func (m *KindExpression) String() string { return proto.CompactTextString(m) } func (*KindExpression) ProtoMessage() {} func (m *KindExpression) GetName() string { if m != nil && m.Name != nil { return *m.Name } return "" } // A reference to a property relative to the kind expressions. // exactly. type PropertyReference struct { // The name of the property. Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *PropertyReference) Reset() { *m = PropertyReference{} } func (m *PropertyReference) String() string { return proto.CompactTextString(m) } func (*PropertyReference) ProtoMessage() {} func (m *PropertyReference) GetName() string { if m != nil && m.Name != nil { return *m.Name } return "" } // A representation of a property in a projection. type PropertyExpression struct { // The property to project. Property *PropertyReference `protobuf:"bytes,1,req,name=property" json:"property,omitempty"` // The aggregation function to apply to the property. Optional. // Can only be used when grouping by at least one property. Must // then be set on all properties in the projection that are not // being grouped by. AggregationFunction *PropertyExpression_AggregationFunction `protobuf:"varint,2,opt,name=aggregation_function,enum=datastore.PropertyExpression_AggregationFunction" json:"aggregation_function,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *PropertyExpression) Reset() { *m = PropertyExpression{} } func (m *PropertyExpression) String() string { return proto.CompactTextString(m) } func (*PropertyExpression) ProtoMessage() {} func (m *PropertyExpression) GetProperty() *PropertyReference { if m != nil { return m.Property } return nil } func (m *PropertyExpression) GetAggregationFunction() PropertyExpression_AggregationFunction { if m != nil && m.AggregationFunction != nil { return *m.AggregationFunction } return PropertyExpression_FIRST } // The desired order for a specific property. type PropertyOrder struct { // The property to order by. Property *PropertyReference `protobuf:"bytes,1,req,name=property" json:"property,omitempty"` // The direction to order by. Direction *PropertyOrder_Direction `protobuf:"varint,2,opt,name=direction,enum=datastore.PropertyOrder_Direction,def=1" json:"direction,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *PropertyOrder) Reset() { *m = PropertyOrder{} } func (m *PropertyOrder) String() string { return proto.CompactTextString(m) } func (*PropertyOrder) ProtoMessage() {} const Default_PropertyOrder_Direction PropertyOrder_Direction = PropertyOrder_ASCENDING func (m *PropertyOrder) GetProperty() *PropertyReference { if m != nil { return m.Property } return nil } func (m *PropertyOrder) GetDirection() PropertyOrder_Direction { if m != nil && m.Direction != nil { return *m.Direction } return Default_PropertyOrder_Direction } // A holder for any type of filter. Exactly one field should be specified. type Filter struct { // A composite filter. CompositeFilter *CompositeFilter `protobuf:"bytes,1,opt,name=composite_filter" json:"composite_filter,omitempty"` // A filter on a property. PropertyFilter *PropertyFilter `protobuf:"bytes,2,opt,name=property_filter" json:"property_filter,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Filter) Reset() { *m = Filter{} } func (m *Filter) String() string { return proto.CompactTextString(m) } func (*Filter) ProtoMessage() {} func (m *Filter) GetCompositeFilter() *CompositeFilter { if m != nil { return m.CompositeFilter } return nil } func (m *Filter) GetPropertyFilter() *PropertyFilter { if m != nil { return m.PropertyFilter } return nil } // A filter that merges the multiple other filters using the given operation. type CompositeFilter struct { // The operator for combining multiple filters. Operator *CompositeFilter_Operator `protobuf:"varint,1,req,name=operator,enum=datastore.CompositeFilter_Operator" json:"operator,omitempty"` // The list of filters to combine. // Must contain at least one filter. Filter []*Filter `protobuf:"bytes,2,rep,name=filter" json:"filter,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *CompositeFilter) Reset() { *m = CompositeFilter{} } func (m *CompositeFilter) String() string { return proto.CompactTextString(m) } func (*CompositeFilter) ProtoMessage() {} func (m *CompositeFilter) GetOperator() CompositeFilter_Operator { if m != nil && m.Operator != nil { return *m.Operator } return CompositeFilter_AND } func (m *CompositeFilter) GetFilter() []*Filter { if m != nil { return m.Filter } return nil } // A filter on a specific property. type PropertyFilter struct { // The property to filter by. Property *PropertyReference `protobuf:"bytes,1,req,name=property" json:"property,omitempty"` // The operator to filter by. Operator *PropertyFilter_Operator `protobuf:"varint,2,req,name=operator,enum=datastore.PropertyFilter_Operator" json:"operator,omitempty"` // The value to compare the property to. Value *Value `protobuf:"bytes,3,req,name=value" json:"value,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *PropertyFilter) Reset() { *m = PropertyFilter{} } func (m *PropertyFilter) String() string { return proto.CompactTextString(m) } func (*PropertyFilter) ProtoMessage() {} func (m *PropertyFilter) GetProperty() *PropertyReference { if m != nil { return m.Property } return nil } func (m *PropertyFilter) GetOperator() PropertyFilter_Operator { if m != nil && m.Operator != nil { return *m.Operator } return PropertyFilter_LESS_THAN } func (m *PropertyFilter) GetValue() *Value { if m != nil { return m.Value } return nil } // A GQL query. type GqlQuery struct { QueryString *string `protobuf:"bytes,1,req,name=query_string" json:"query_string,omitempty"` // When false, the query string must not contain a literal. AllowLiteral *bool `protobuf:"varint,2,opt,name=allow_literal,def=0" json:"allow_literal,omitempty"` // A named argument must set field GqlQueryArg.name. // No two named arguments may have the same name. // For each non-reserved named binding site in the query string, // there must be a named argument with that name, // but not necessarily the inverse. NameArg []*GqlQueryArg `protobuf:"bytes,3,rep,name=name_arg" json:"name_arg,omitempty"` // Numbered binding site @1 references the first numbered argument, // effectively using 1-based indexing, rather than the usual 0. // A numbered argument must NOT set field GqlQueryArg.name. // For each binding site numbered i in query_string, // there must be an ith numbered argument. // The inverse must also be true. NumberArg []*GqlQueryArg `protobuf:"bytes,4,rep,name=number_arg" json:"number_arg,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *GqlQuery) Reset() { *m = GqlQuery{} } func (m *GqlQuery) String() string { return proto.CompactTextString(m) } func (*GqlQuery) ProtoMessage() {} const Default_GqlQuery_AllowLiteral bool = false func (m *GqlQuery) GetQueryString() string { if m != nil && m.QueryString != nil { return *m.QueryString } return "" } func (m *GqlQuery) GetAllowLiteral() bool { if m != nil && m.AllowLiteral != nil { return *m.AllowLiteral } return Default_GqlQuery_AllowLiteral } func (m *GqlQuery) GetNameArg() []*GqlQueryArg { if m != nil { return m.NameArg } return nil } func (m *GqlQuery) GetNumberArg() []*GqlQueryArg { if m != nil { return m.NumberArg } return nil } // A binding argument for a GQL query. // Exactly one of fields value and cursor must be set. type GqlQueryArg struct { // Must match regex "[A-Za-z_$][A-Za-z_$0-9]*". // Must not match regex "__.*__". // Must not be "". Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` Value *Value `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` Cursor []byte `protobuf:"bytes,3,opt,name=cursor" json:"cursor,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *GqlQueryArg) Reset() { *m = GqlQueryArg{} } func (m *GqlQueryArg) String() string { return proto.CompactTextString(m) } func (*GqlQueryArg) ProtoMessage() {} func (m *GqlQueryArg) GetName() string { if m != nil && m.Name != nil { return *m.Name } return "" } func (m *GqlQueryArg) GetValue() *Value { if m != nil { return m.Value } return nil } func (m *GqlQueryArg) GetCursor() []byte { if m != nil { return m.Cursor } return nil } // A batch of results produced by a query. type QueryResultBatch struct { // The result type for every entity in entityResults. EntityResultType *EntityResult_ResultType `protobuf:"varint,1,req,name=entity_result_type,enum=datastore.EntityResult_ResultType" json:"entity_result_type,omitempty"` // The results for this batch. EntityResult []*EntityResult `protobuf:"bytes,2,rep,name=entity_result" json:"entity_result,omitempty"` // A cursor that points to the position after the last result in the batch. // May be absent. EndCursor []byte `protobuf:"bytes,4,opt,name=end_cursor" json:"end_cursor,omitempty"` // The state of the query after the current batch. MoreResults *QueryResultBatch_MoreResultsType `protobuf:"varint,5,req,name=more_results,enum=datastore.QueryResultBatch_MoreResultsType" json:"more_results,omitempty"` // The number of results skipped because of Query.offset. SkippedResults *int32 `protobuf:"varint,6,opt,name=skipped_results" json:"skipped_results,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *QueryResultBatch) Reset() { *m = QueryResultBatch{} } func (m *QueryResultBatch) String() string { return proto.CompactTextString(m) } func (*QueryResultBatch) ProtoMessage() {} func (m *QueryResultBatch) GetEntityResultType() EntityResult_ResultType { if m != nil && m.EntityResultType != nil { return *m.EntityResultType } return EntityResult_FULL } func (m *QueryResultBatch) GetEntityResult() []*EntityResult { if m != nil { return m.EntityResult } return nil } func (m *QueryResultBatch) GetEndCursor() []byte { if m != nil { return m.EndCursor } return nil } func (m *QueryResultBatch) GetMoreResults() QueryResultBatch_MoreResultsType { if m != nil && m.MoreResults != nil { return *m.MoreResults } return QueryResultBatch_NOT_FINISHED } func (m *QueryResultBatch) GetSkippedResults() int32 { if m != nil && m.SkippedResults != nil { return *m.SkippedResults } return 0 } // A set of changes to apply. // // No entity in this message may have a reserved property name, // not even a property in an entity in a value. // No value in this message may have meaning 18, // not even a value in an entity in another value. // // If entities with duplicate keys are present, an arbitrary choice will // be made as to which is written. type Mutation struct { // Entities to upsert. // Each upserted entity's key must have a complete path and // must not be reserved/read-only. Upsert []*Entity `protobuf:"bytes,1,rep,name=upsert" json:"upsert,omitempty"` // Entities to update. // Each updated entity's key must have a complete path and // must not be reserved/read-only. Update []*Entity `protobuf:"bytes,2,rep,name=update" json:"update,omitempty"` // Entities to insert. // Each inserted entity's key must have a complete path and // must not be reserved/read-only. Insert []*Entity `protobuf:"bytes,3,rep,name=insert" json:"insert,omitempty"` // Insert entities with a newly allocated ID. // Each inserted entity's key must omit the final identifier in its path and // must not be reserved/read-only. InsertAutoId []*Entity `protobuf:"bytes,4,rep,name=insert_auto_id" json:"insert_auto_id,omitempty"` // Keys of entities to delete. // Each key must have a complete key path and must not be reserved/read-only. Delete []*Key `protobuf:"bytes,5,rep,name=delete" json:"delete,omitempty"` // Ignore a user specified read-only period. Optional. Force *bool `protobuf:"varint,6,opt,name=force" json:"force,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Mutation) Reset() { *m = Mutation{} } func (m *Mutation) String() string { return proto.CompactTextString(m) } func (*Mutation) ProtoMessage() {} func (m *Mutation) GetUpsert() []*Entity { if m != nil { return m.Upsert } return nil } func (m *Mutation) GetUpdate() []*Entity { if m != nil { return m.Update } return nil } func (m *Mutation) GetInsert() []*Entity { if m != nil { return m.Insert } return nil } func (m *Mutation) GetInsertAutoId() []*Entity { if m != nil { return m.InsertAutoId } return nil } func (m *Mutation) GetDelete() []*Key { if m != nil { return m.Delete } return nil } func (m *Mutation) GetForce() bool { if m != nil && m.Force != nil { return *m.Force } return false } // The result of applying a mutation. type MutationResult struct { // Number of index writes. IndexUpdates *int32 `protobuf:"varint,1,req,name=index_updates" json:"index_updates,omitempty"` // Keys for insertAutoId entities. One per entity from the // request, in the same order. InsertAutoIdKey []*Key `protobuf:"bytes,2,rep,name=insert_auto_id_key" json:"insert_auto_id_key,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *MutationResult) Reset() { *m = MutationResult{} } func (m *MutationResult) String() string { return proto.CompactTextString(m) } func (*MutationResult) ProtoMessage() {} func (m *MutationResult) GetIndexUpdates() int32 { if m != nil && m.IndexUpdates != nil { return *m.IndexUpdates } return 0 } func (m *MutationResult) GetInsertAutoIdKey() []*Key { if m != nil { return m.InsertAutoIdKey } return nil } // Options shared by read requests. type ReadOptions struct { // The read consistency to use. // Cannot be set when transaction is set. // Lookup and ancestor queries default to STRONG, global queries default to // EVENTUAL and cannot be set to STRONG. ReadConsistency *ReadOptions_ReadConsistency `protobuf:"varint,1,opt,name=read_consistency,enum=datastore.ReadOptions_ReadConsistency,def=0" json:"read_consistency,omitempty"` // The transaction to use. Optional. Transaction []byte `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *ReadOptions) Reset() { *m = ReadOptions{} } func (m *ReadOptions) String() string { return proto.CompactTextString(m) } func (*ReadOptions) ProtoMessage() {} const Default_ReadOptions_ReadConsistency ReadOptions_ReadConsistency = ReadOptions_DEFAULT func (m *ReadOptions) GetReadConsistency() ReadOptions_ReadConsistency { if m != nil && m.ReadConsistency != nil { return *m.ReadConsistency } return Default_ReadOptions_ReadConsistency } func (m *ReadOptions) GetTransaction() []byte { if m != nil { return m.Transaction } return nil } // The request for Lookup. type LookupRequest struct { // Options for this lookup request. Optional. ReadOptions *ReadOptions `protobuf:"bytes,1,opt,name=read_options" json:"read_options,omitempty"` // Keys of entities to look up from the datastore. Key []*Key `protobuf:"bytes,3,rep,name=key" json:"key,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *LookupRequest) Reset() { *m = LookupRequest{} } func (m *LookupRequest) String() string { return proto.CompactTextString(m) } func (*LookupRequest) ProtoMessage() {} func (m *LookupRequest) GetReadOptions() *ReadOptions { if m != nil { return m.ReadOptions } return nil } func (m *LookupRequest) GetKey() []*Key { if m != nil { return m.Key } return nil } // The response for Lookup. type LookupResponse struct { // Entities found as ResultType.FULL entities. Found []*EntityResult `protobuf:"bytes,1,rep,name=found" json:"found,omitempty"` // Entities not found as ResultType.KEY_ONLY entities. Missing []*EntityResult `protobuf:"bytes,2,rep,name=missing" json:"missing,omitempty"` // A list of keys that were not looked up due to resource constraints. Deferred []*Key `protobuf:"bytes,3,rep,name=deferred" json:"deferred,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *LookupResponse) Reset() { *m = LookupResponse{} } func (m *LookupResponse) String() string { return proto.CompactTextString(m) } func (*LookupResponse) ProtoMessage() {} func (m *LookupResponse) GetFound() []*EntityResult { if m != nil { return m.Found } return nil } func (m *LookupResponse) GetMissing() []*EntityResult { if m != nil { return m.Missing } return nil } func (m *LookupResponse) GetDeferred() []*Key { if m != nil { return m.Deferred } return nil } // The request for RunQuery. type RunQueryRequest struct { // The options for this query. ReadOptions *ReadOptions `protobuf:"bytes,1,opt,name=read_options" json:"read_options,omitempty"` // Entities are partitioned into subsets, identified by a dataset (usually // implicitly specified by the project) and namespace ID. Queries are scoped // to a single partition. // This partition ID is normalized with the standard default context // partition ID, but all other partition IDs in RunQueryRequest are // normalized with this partition ID as the context partition ID. PartitionId *PartitionId `protobuf:"bytes,2,opt,name=partition_id" json:"partition_id,omitempty"` // The query to run. // Either this field or field gql_query must be set, but not both. Query *Query `protobuf:"bytes,3,opt,name=query" json:"query,omitempty"` // The GQL query to run. // Either this field or field query must be set, but not both. GqlQuery *GqlQuery `protobuf:"bytes,7,opt,name=gql_query" json:"gql_query,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *RunQueryRequest) Reset() { *m = RunQueryRequest{} } func (m *RunQueryRequest) String() string { return proto.CompactTextString(m) } func (*RunQueryRequest) ProtoMessage() {} func (m *RunQueryRequest) GetReadOptions() *ReadOptions { if m != nil { return m.ReadOptions } return nil } func (m *RunQueryRequest) GetPartitionId() *PartitionId { if m != nil { return m.PartitionId } return nil } func (m *RunQueryRequest) GetQuery() *Query { if m != nil { return m.Query } return nil } func (m *RunQueryRequest) GetGqlQuery() *GqlQuery { if m != nil { return m.GqlQuery } return nil } // The response for RunQuery. type RunQueryResponse struct { // A batch of query results (always present). Batch *QueryResultBatch `protobuf:"bytes,1,opt,name=batch" json:"batch,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *RunQueryResponse) Reset() { *m = RunQueryResponse{} } func (m *RunQueryResponse) String() string { return proto.CompactTextString(m) } func (*RunQueryResponse) ProtoMessage() {} func (m *RunQueryResponse) GetBatch() *QueryResultBatch { if m != nil { return m.Batch } return nil } // The request for BeginTransaction. type BeginTransactionRequest struct { // The transaction isolation level. IsolationLevel *BeginTransactionRequest_IsolationLevel `protobuf:"varint,1,opt,name=isolation_level,enum=datastore.BeginTransactionRequest_IsolationLevel,def=0" json:"isolation_level,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} } func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) } func (*BeginTransactionRequest) ProtoMessage() {} const Default_BeginTransactionRequest_IsolationLevel BeginTransactionRequest_IsolationLevel = BeginTransactionRequest_SNAPSHOT func (m *BeginTransactionRequest) GetIsolationLevel() BeginTransactionRequest_IsolationLevel { if m != nil && m.IsolationLevel != nil { return *m.IsolationLevel } return Default_BeginTransactionRequest_IsolationLevel } // The response for BeginTransaction. type BeginTransactionResponse struct { // The transaction identifier (always present). Transaction []byte `protobuf:"bytes,1,opt,name=transaction" json:"transaction,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *BeginTransactionResponse) Reset() { *m = BeginTransactionResponse{} } func (m *BeginTransactionResponse) String() string { return proto.CompactTextString(m) } func (*BeginTransactionResponse) ProtoMessage() {} func (m *BeginTransactionResponse) GetTransaction() []byte { if m != nil { return m.Transaction } return nil } // The request for Rollback. type RollbackRequest struct { // The transaction identifier, returned by a call to // beginTransaction. Transaction []byte `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *RollbackRequest) Reset() { *m = RollbackRequest{} } func (m *RollbackRequest) String() string { return proto.CompactTextString(m) } func (*RollbackRequest) ProtoMessage() {} func (m *RollbackRequest) GetTransaction() []byte { if m != nil { return m.Transaction } return nil } // The response for Rollback. type RollbackResponse struct { XXX_unrecognized []byte `json:"-"` } func (m *RollbackResponse) Reset() { *m = RollbackResponse{} } func (m *RollbackResponse) String() string { return proto.CompactTextString(m) } func (*RollbackResponse) ProtoMessage() {} // The request for Commit. type CommitRequest struct { // The transaction identifier, returned by a call to // beginTransaction. Must be set when mode is TRANSACTIONAL. Transaction []byte `protobuf:"bytes,1,opt,name=transaction" json:"transaction,omitempty"` // The mutation to perform. Optional. Mutation *Mutation `protobuf:"bytes,2,opt,name=mutation" json:"mutation,omitempty"` // The type of commit to perform. Either TRANSACTIONAL or NON_TRANSACTIONAL. Mode *CommitRequest_Mode `protobuf:"varint,5,opt,name=mode,enum=datastore.CommitRequest_Mode,def=1" json:"mode,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *CommitRequest) Reset() { *m = CommitRequest{} } func (m *CommitRequest) String() string { return proto.CompactTextString(m) } func (*CommitRequest) ProtoMessage() {} const Default_CommitRequest_Mode CommitRequest_Mode = CommitRequest_TRANSACTIONAL func (m *CommitRequest) GetTransaction() []byte { if m != nil { return m.Transaction } return nil } func (m *CommitRequest) GetMutation() *Mutation { if m != nil { return m.Mutation } return nil } func (m *CommitRequest) GetMode() CommitRequest_Mode { if m != nil && m.Mode != nil { return *m.Mode } return Default_CommitRequest_Mode } // The response for Commit. type CommitResponse struct { // The result of performing the mutation (if any). MutationResult *MutationResult `protobuf:"bytes,1,opt,name=mutation_result" json:"mutation_result,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *CommitResponse) Reset() { *m = CommitResponse{} } func (m *CommitResponse) String() string { return proto.CompactTextString(m) } func (*CommitResponse) ProtoMessage() {} func (m *CommitResponse) GetMutationResult() *MutationResult { if m != nil { return m.MutationResult } return nil } // The request for AllocateIds. type AllocateIdsRequest struct { // A list of keys with incomplete key paths to allocate IDs for. // No key may be reserved/read-only. Key []*Key `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} } func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) } func (*AllocateIdsRequest) ProtoMessage() {} func (m *AllocateIdsRequest) GetKey() []*Key { if m != nil { return m.Key } return nil } // The response for AllocateIds. type AllocateIdsResponse struct { // The keys specified in the request (in the same order), each with // its key path completed with a newly allocated ID. Key []*Key `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} } func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) } func (*AllocateIdsResponse) ProtoMessage() {} func (m *AllocateIdsResponse) GetKey() []*Key { if m != nil { return m.Key } return nil } func init() { proto.RegisterEnum("datastore.EntityResult_ResultType", EntityResult_ResultType_name, EntityResult_ResultType_value) proto.RegisterEnum("datastore.PropertyExpression_AggregationFunction", PropertyExpression_AggregationFunction_name, PropertyExpression_AggregationFunction_value) proto.RegisterEnum("datastore.PropertyOrder_Direction", PropertyOrder_Direction_name, PropertyOrder_Direction_value) proto.RegisterEnum("datastore.CompositeFilter_Operator", CompositeFilter_Operator_name, CompositeFilter_Operator_value) proto.RegisterEnum("datastore.PropertyFilter_Operator", PropertyFilter_Operator_name, PropertyFilter_Operator_value) proto.RegisterEnum("datastore.QueryResultBatch_MoreResultsType", QueryResultBatch_MoreResultsType_name, QueryResultBatch_MoreResultsType_value) proto.RegisterEnum("datastore.ReadOptions_ReadConsistency", ReadOptions_ReadConsistency_name, ReadOptions_ReadConsistency_value) proto.RegisterEnum("datastore.BeginTransactionRequest_IsolationLevel", BeginTransactionRequest_IsolationLevel_name, BeginTransactionRequest_IsolationLevel_value) proto.RegisterEnum("datastore.CommitRequest_Mode", CommitRequest_Mode_name, CommitRequest_Mode_value) } ================================================ FILE: vendor/google.golang.org/cloud/internal/datastore/datastore_v1.proto ================================================ // Copyright 2013 Google Inc. All Rights Reserved. // // The datastore v1 service proto definitions syntax = "proto2"; package datastore; option java_package = "com.google.api.services.datastore"; // An identifier for a particular subset of entities. // // Entities are partitioned into various subsets, each used by different // datasets and different namespaces within a dataset and so forth. // // All input partition IDs are normalized before use. // A partition ID is normalized as follows: // If the partition ID is unset or is set to an empty partition ID, replace it // with the context partition ID. // Otherwise, if the partition ID has no dataset ID, assign it the context // partition ID's dataset ID. // Unless otherwise documented, the context partition ID has the dataset ID set // to the context dataset ID and no other partition dimension set. // // A partition ID is empty if all of its fields are unset. // // Partition dimension: // A dimension may be unset. // A dimension's value must never be "". // A dimension's value must match [A-Za-z\d\.\-_]{1,100} // If the value of any dimension matches regex "__.*__", // the partition is reserved/read-only. // A reserved/read-only partition ID is forbidden in certain documented contexts. // // Dataset ID: // A dataset id's value must never be "". // A dataset id's value must match // ([a-z\d\-]{1,100}~)?([a-z\d][a-z\d\-\.]{0,99}:)?([a-z\d][a-z\d\-]{0,99} message PartitionId { // The dataset ID. optional string dataset_id = 3; // The namespace. optional string namespace = 4; } // A unique identifier for an entity. // If a key's partition id or any of its path kinds or names are // reserved/read-only, the key is reserved/read-only. // A reserved/read-only key is forbidden in certain documented contexts. message Key { // Entities are partitioned into subsets, currently identified by a dataset // (usually implicitly specified by the project) and namespace ID. // Queries are scoped to a single partition. optional PartitionId partition_id = 1; // A (kind, ID/name) pair used to construct a key path. // // At most one of name or ID may be set. // If either is set, the element is complete. // If neither is set, the element is incomplete. message PathElement { // The kind of the entity. // A kind matching regex "__.*__" is reserved/read-only. // A kind must not contain more than 500 characters. // Cannot be "". required string kind = 1; // The ID of the entity. // Never equal to zero. Values less than zero are discouraged and will not // be supported in the future. optional int64 id = 2; // The name of the entity. // A name matching regex "__.*__" is reserved/read-only. // A name must not be more than 500 characters. // Cannot be "". optional string name = 3; } // The entity path. // An entity path consists of one or more elements composed of a kind and a // string or numerical identifier, which identify entities. The first // element identifies a root entity, the second element identifies // a child of the root entity, the third element a child of the // second entity, and so forth. The entities identified by all prefixes of // the path are called the element's ancestors. // An entity path is always fully complete: ALL of the entity's ancestors // are required to be in the path along with the entity identifier itself. // The only exception is that in some documented cases, the identifier in the // last path element (for the entity) itself may be omitted. A path can never // be empty. repeated PathElement path_element = 2; } // A message that can hold any of the supported value types and associated // metadata. // // At most one of the Value fields may be set. // If none are set the value is "null". // message Value { // A boolean value. optional bool boolean_value = 1; // An integer value. optional int64 integer_value = 2; // A double value. optional double double_value = 3; // A timestamp value. optional int64 timestamp_microseconds_value = 4; // A key value. optional Key key_value = 5; // A blob key value. optional string blob_key_value = 16; // A UTF-8 encoded string value. optional string string_value = 17; // A blob value. optional bytes blob_value = 18; // An entity value. // May have no key. // May have a key with an incomplete key path. // May have a reserved/read-only key. optional Entity entity_value = 6; // A list value. // Cannot contain another list value. // Cannot also have a meaning and indexing set. repeated Value list_value = 7; // The meaning field is reserved and should not be used. optional int32 meaning = 14; // If the value should be indexed. // // The indexed property may be set for a // null value. // When indexed is true, stringValue // is limited to 500 characters and the blob value is limited to 500 bytes. // Exception: If meaning is set to 2, string_value is limited to 2038 // characters regardless of indexed. // When indexed is true, meaning 15 and 22 are not allowed, and meaning 16 // will be ignored on input (and will never be set on output). // Input values by default have indexed set to // true; however, you can explicitly set indexed to // true if you want. (An output value never has // indexed explicitly set to true.) If a value is // itself an entity, it cannot have indexed set to // true. // Exception: An entity value with meaning 9, 20 or 21 may be indexed. optional bool indexed = 15 [default = true]; } // An entity property. message Property { // The name of the property. // A property name matching regex "__.*__" is reserved. // A reserved property name is forbidden in certain documented contexts. // The name must not contain more than 500 characters. // Cannot be "". required string name = 1; // The value(s) of the property. // Each value can have only one value property populated. For example, // you cannot have a values list of { value: { integerValue: 22, // stringValue: "a" } }, but you can have { value: { listValue: // [ { integerValue: 22 }, { stringValue: "a" } ] }. required Value value = 4; } // An entity. // // An entity is limited to 1 megabyte when stored. That roughly // corresponds to a limit of 1 megabyte for the serialized form of this // message. message Entity { // The entity's key. // // An entity must have a key, unless otherwise documented (for example, // an entity in Value.entityValue may have no key). // An entity's kind is its key's path's last element's kind, // or null if it has no key. optional Key key = 1; // The entity's properties. // Each property's name must be unique for its entity. repeated Property property = 2; } // The result of fetching an entity from the datastore. message EntityResult { // Specifies what data the 'entity' field contains. // A ResultType is either implied (for example, in LookupResponse.found it // is always FULL) or specified by context (for example, in message // QueryResultBatch, field 'entity_result_type' specifies a ResultType // for all the values in field 'entity_result'). enum ResultType { FULL = 1; // The entire entity. PROJECTION = 2; // A projected subset of properties. // The entity may have no key. // A property value may have meaning 18. KEY_ONLY = 3; // Only the key. } // The resulting entity. required Entity entity = 1; } // A query. message Query { // The projection to return. If not set the entire entity is returned. repeated PropertyExpression projection = 2; // The kinds to query (if empty, returns entities from all kinds). repeated KindExpression kind = 3; // The filter to apply (optional). optional Filter filter = 4; // The order to apply to the query results (if empty, order is unspecified). repeated PropertyOrder order = 5; // The properties to group by (if empty, no grouping is applied to the // result set). repeated PropertyReference group_by = 6; // A starting point for the query results. Optional. Query cursors are // returned in query result batches. optional bytes /* serialized QueryCursor */ start_cursor = 7; // An ending point for the query results. Optional. Query cursors are // returned in query result batches. optional bytes /* serialized QueryCursor */ end_cursor = 8; // The number of results to skip. Applies before limit, but after all other // constraints (optional, defaults to 0). optional int32 offset = 10 [default=0]; // The maximum number of results to return. Applies after all other // constraints. Optional. optional int32 limit = 11; } // A representation of a kind. message KindExpression { // The name of the kind. required string name = 1; } // A reference to a property relative to the kind expressions. // exactly. message PropertyReference { // The name of the property. required string name = 2; } // A representation of a property in a projection. message PropertyExpression { enum AggregationFunction { FIRST = 1; } // The property to project. required PropertyReference property = 1; // The aggregation function to apply to the property. Optional. // Can only be used when grouping by at least one property. Must // then be set on all properties in the projection that are not // being grouped by. optional AggregationFunction aggregation_function = 2; } // The desired order for a specific property. message PropertyOrder { enum Direction { ASCENDING = 1; DESCENDING = 2; } // The property to order by. required PropertyReference property = 1; // The direction to order by. optional Direction direction = 2 [default=ASCENDING]; } // A holder for any type of filter. Exactly one field should be specified. message Filter { // A composite filter. optional CompositeFilter composite_filter = 1; // A filter on a property. optional PropertyFilter property_filter = 2; } // A filter that merges the multiple other filters using the given operation. message CompositeFilter { enum Operator { AND = 1; } // The operator for combining multiple filters. required Operator operator = 1; // The list of filters to combine. // Must contain at least one filter. repeated Filter filter = 2; } // A filter on a specific property. message PropertyFilter { enum Operator { LESS_THAN = 1; LESS_THAN_OR_EQUAL = 2; GREATER_THAN = 3; GREATER_THAN_OR_EQUAL = 4; EQUAL = 5; HAS_ANCESTOR = 11; } // The property to filter by. required PropertyReference property = 1; // The operator to filter by. required Operator operator = 2; // The value to compare the property to. required Value value = 3; } // A GQL query. message GqlQuery { required string query_string = 1; // When false, the query string must not contain a literal. optional bool allow_literal = 2 [default = false]; // A named argument must set field GqlQueryArg.name. // No two named arguments may have the same name. // For each non-reserved named binding site in the query string, // there must be a named argument with that name, // but not necessarily the inverse. repeated GqlQueryArg name_arg = 3; // Numbered binding site @1 references the first numbered argument, // effectively using 1-based indexing, rather than the usual 0. // A numbered argument must NOT set field GqlQueryArg.name. // For each binding site numbered i in query_string, // there must be an ith numbered argument. // The inverse must also be true. repeated GqlQueryArg number_arg = 4; } // A binding argument for a GQL query. // Exactly one of fields value and cursor must be set. message GqlQueryArg { // Must match regex "[A-Za-z_$][A-Za-z_$0-9]*". // Must not match regex "__.*__". // Must not be "". optional string name = 1; optional Value value = 2; optional bytes cursor = 3; } // A batch of results produced by a query. message QueryResultBatch { // The possible values for the 'more_results' field. enum MoreResultsType { NOT_FINISHED = 1; // There are additional batches to fetch from this query. MORE_RESULTS_AFTER_LIMIT = 2; // The query is finished, but there are more // results after the limit. NO_MORE_RESULTS = 3; // The query has been exhausted. } // The result type for every entity in entityResults. required EntityResult.ResultType entity_result_type = 1; // The results for this batch. repeated EntityResult entity_result = 2; // A cursor that points to the position after the last result in the batch. // May be absent. optional bytes /* serialized QueryCursor */ end_cursor = 4; // The state of the query after the current batch. required MoreResultsType more_results = 5; // The number of results skipped because of Query.offset. optional int32 skipped_results = 6; } // A set of changes to apply. // // No entity in this message may have a reserved property name, // not even a property in an entity in a value. // No value in this message may have meaning 18, // not even a value in an entity in another value. // // If entities with duplicate keys are present, an arbitrary choice will // be made as to which is written. message Mutation { // Entities to upsert. // Each upserted entity's key must have a complete path and // must not be reserved/read-only. repeated Entity upsert = 1; // Entities to update. // Each updated entity's key must have a complete path and // must not be reserved/read-only. repeated Entity update = 2; // Entities to insert. // Each inserted entity's key must have a complete path and // must not be reserved/read-only. repeated Entity insert = 3; // Insert entities with a newly allocated ID. // Each inserted entity's key must omit the final identifier in its path and // must not be reserved/read-only. repeated Entity insert_auto_id = 4; // Keys of entities to delete. // Each key must have a complete key path and must not be reserved/read-only. repeated Key delete = 5; // Ignore a user specified read-only period. Optional. optional bool force = 6; } // The result of applying a mutation. message MutationResult { // Number of index writes. required int32 index_updates = 1; // Keys for insertAutoId entities. One per entity from the // request, in the same order. repeated Key insert_auto_id_key = 2; } // Options shared by read requests. message ReadOptions { enum ReadConsistency { DEFAULT = 0; STRONG = 1; EVENTUAL = 2; } // The read consistency to use. // Cannot be set when transaction is set. // Lookup and ancestor queries default to STRONG, global queries default to // EVENTUAL and cannot be set to STRONG. optional ReadConsistency read_consistency = 1 [default=DEFAULT]; // The transaction to use. Optional. optional bytes /* serialized Transaction */ transaction = 2; } // The request for Lookup. message LookupRequest { // Options for this lookup request. Optional. optional ReadOptions read_options = 1; // Keys of entities to look up from the datastore. repeated Key key = 3; } // The response for Lookup. message LookupResponse { // The order of results in these fields is undefined and has no relation to // the order of the keys in the input. // Entities found as ResultType.FULL entities. repeated EntityResult found = 1; // Entities not found as ResultType.KEY_ONLY entities. repeated EntityResult missing = 2; // A list of keys that were not looked up due to resource constraints. repeated Key deferred = 3; } // The request for RunQuery. message RunQueryRequest { // The options for this query. optional ReadOptions read_options = 1; // Entities are partitioned into subsets, identified by a dataset (usually // implicitly specified by the project) and namespace ID. Queries are scoped // to a single partition. // This partition ID is normalized with the standard default context // partition ID, but all other partition IDs in RunQueryRequest are // normalized with this partition ID as the context partition ID. optional PartitionId partition_id = 2; // The query to run. // Either this field or field gql_query must be set, but not both. optional Query query = 3; // The GQL query to run. // Either this field or field query must be set, but not both. optional GqlQuery gql_query = 7; } // The response for RunQuery. message RunQueryResponse { // A batch of query results (always present). optional QueryResultBatch batch = 1; } // The request for BeginTransaction. message BeginTransactionRequest { enum IsolationLevel { SNAPSHOT = 0; // Read from a consistent snapshot. Concurrent transactions // conflict if their mutations conflict. For example: // Read(A),Write(B) may not conflict with Read(B),Write(A), // but Read(B),Write(B) does conflict with Read(B),Write(B). SERIALIZABLE = 1; // Read from a consistent snapshot. Concurrent // transactions conflict if they cannot be serialized. // For example Read(A),Write(B) does conflict with // Read(B),Write(A) but Read(A) may not conflict with // Write(A). } // The transaction isolation level. optional IsolationLevel isolation_level = 1 [default=SNAPSHOT]; } // The response for BeginTransaction. message BeginTransactionResponse { // The transaction identifier (always present). optional bytes /* serialized Transaction */ transaction = 1; } // The request for Rollback. message RollbackRequest { // The transaction identifier, returned by a call to // beginTransaction. required bytes /* serialized Transaction */ transaction = 1; } // The response for Rollback. message RollbackResponse { // Empty } // The request for Commit. message CommitRequest { enum Mode { TRANSACTIONAL = 1; NON_TRANSACTIONAL = 2; } // The transaction identifier, returned by a call to // beginTransaction. Must be set when mode is TRANSACTIONAL. optional bytes /* serialized Transaction */ transaction = 1; // The mutation to perform. Optional. optional Mutation mutation = 2; // The type of commit to perform. Either TRANSACTIONAL or NON_TRANSACTIONAL. optional Mode mode = 5 [default=TRANSACTIONAL]; } // The response for Commit. message CommitResponse { // The result of performing the mutation (if any). optional MutationResult mutation_result = 1; } // The request for AllocateIds. message AllocateIdsRequest { // A list of keys with incomplete key paths to allocate IDs for. // No key may be reserved/read-only. repeated Key key = 1; } // The response for AllocateIds. message AllocateIdsResponse { // The keys specified in the request (in the same order), each with // its key path completed with a newly allocated ID. repeated Key key = 1; } // Each rpc normalizes the partition IDs of the keys in its input entities, // and always returns entities with keys with normalized partition IDs. // (Note that applies to all entities, including entities in values.) service DatastoreService { // Look up some entities by key. rpc Lookup(LookupRequest) returns (LookupResponse) { }; // Query for entities. rpc RunQuery(RunQueryRequest) returns (RunQueryResponse) { }; // Begin a new transaction. rpc BeginTransaction(BeginTransactionRequest) returns (BeginTransactionResponse) { }; // Commit a transaction, optionally creating, deleting or modifying some // entities. rpc Commit(CommitRequest) returns (CommitResponse) { }; // Roll back a transaction. rpc Rollback(RollbackRequest) returns (RollbackResponse) { }; // Allocate IDs for incomplete keys (useful for referencing an entity before // it is inserted). rpc AllocateIds(AllocateIdsRequest) returns (AllocateIdsResponse) { }; } ================================================ FILE: vendor/google.golang.org/cloud/internal/testutil/context.go ================================================ // Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package testutil contains helper functions for writing tests. package testutil import ( "io/ioutil" "log" "net/http" "os" "golang.org/x/net/context" "golang.org/x/oauth2" "golang.org/x/oauth2/google" "google.golang.org/cloud" ) const ( envProjID = "GCLOUD_TESTS_GOLANG_PROJECT_ID" envPrivateKey = "GCLOUD_TESTS_GOLANG_KEY" ) func Context(scopes ...string) context.Context { key, projID := os.Getenv(envPrivateKey), os.Getenv(envProjID) if key == "" || projID == "" { log.Fatal("GCLOUD_TESTS_GOLANG_KEY and GCLOUD_TESTS_GOLANG_PROJECT_ID must be set. See CONTRIBUTING.md for details.") } jsonKey, err := ioutil.ReadFile(key) if err != nil { log.Fatalf("Cannot read the JSON key file, err: %v", err) } conf, err := google.JWTConfigFromJSON(jsonKey, scopes...) if err != nil { log.Fatal(err) } return cloud.NewContext(projID, conf.Client(oauth2.NoContext)) } func NoAuthContext() context.Context { projID := os.Getenv(envProjID) if projID == "" { log.Fatal("GCLOUD_TESTS_GOLANG_PROJECT_ID must be set. See CONTRIBUTING.md for details.") } return cloud.NewContext(projID, &http.Client{Transport: http.DefaultTransport}) } ================================================ FILE: vendor/google.golang.org/cloud/option.go ================================================ /* Copyright 2015 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cloud import ( "errors" "fmt" "net/http" "golang.org/x/net/context" "golang.org/x/oauth2" "golang.org/x/oauth2/google" "google.golang.org/grpc" "google.golang.org/grpc/credentials" ) type dialOpt struct { endpoint string scopes []string tokenSource oauth2.TokenSource httpClient *http.Client grpcClient *grpc.ClientConn } // ClientOption is used when construct clients for each cloud service. type ClientOption interface { resolve(*dialOpt) } // WithTokenSource returns a ClientOption that specifies an OAuth2 token // source to be used as the basis for authentication. func WithTokenSource(s oauth2.TokenSource) ClientOption { return withTokenSource{s} } type withTokenSource struct{ ts oauth2.TokenSource } func (w withTokenSource) resolve(o *dialOpt) { o.tokenSource = w.ts } // WithEndpoint returns a ClientOption that overrides the default endpoint // to be used for a service. func WithEndpoint(url string) ClientOption { return withEndpoint(url) } type withEndpoint string func (w withEndpoint) resolve(o *dialOpt) { o.endpoint = string(w) } // WithScopes returns a ClientOption that overrides the default OAuth2 scopes // to be used for a service. func WithScopes(scope ...string) ClientOption { return withScopes(scope) } type withScopes []string func (w withScopes) resolve(o *dialOpt) { o.scopes = []string(w) } // WithBaseHTTP returns a ClientOption that specifies the HTTP client to // use as the basis of communications. This option may only be used with // services that support HTTP as their communication transport. func WithBaseHTTP(client *http.Client) ClientOption { return withBaseHTTP{client} } type withBaseHTTP struct{ client *http.Client } func (w withBaseHTTP) resolve(o *dialOpt) { o.httpClient = w.client } // WithBaseGRPC returns a ClientOption that specifies the GRPC client // connection to use as the basis of communications. This option many only be // used with services that support HRPC as their communication transport. func WithBaseGRPC(client *grpc.ClientConn) ClientOption { return withBaseGRPC{client} } type withBaseGRPC struct{ client *grpc.ClientConn } func (w withBaseGRPC) resolve(o *dialOpt) { o.grpcClient = w.client } // DialHTTP returns an HTTP client for use communicating with a Google cloud // service, configured with the given ClientOptions. Most developers should // call the relevant NewClient method for the target service rather than // invoking DialHTTP directly. func DialHTTP(ctx context.Context, opt ...ClientOption) (*http.Client, error) { var o dialOpt for _, opt := range opt { opt.resolve(&o) } if o.grpcClient != nil { return nil, errors.New("unsupported GRPC base transport specified") } // TODO(djd): Wrap all http.Client's with appropriate internal version to add // UserAgent header and prepend correct endpoint. if o.httpClient != nil { return o.httpClient, nil } if o.tokenSource == nil { var err error o.tokenSource, err = google.DefaultTokenSource(ctx, o.scopes...) if err != nil { return nil, fmt.Errorf("google.DefaultTokenSource: %v", err) } } return oauth2.NewClient(ctx, o.tokenSource), nil } // DialGRPC returns a GRPC connection for use communicating with a Google cloud // service, configured with the given ClientOptions. Most developers should // call the relevant NewClient method for the target service rather than // invoking DialGRPC directly. func DialGRPC(ctx context.Context, opt ...ClientOption) (*grpc.ClientConn, error) { var o dialOpt for _, opt := range opt { opt.resolve(&o) } if o.httpClient != nil { return nil, errors.New("unsupported HTTP base transport specified") } if o.grpcClient != nil { return o.grpcClient, nil } if o.tokenSource == nil { var err error o.tokenSource, err = google.DefaultTokenSource(ctx, o.scopes...) if err != nil { return nil, fmt.Errorf("google.DefaultTokenSource: %v", err) } } grpcOpts := []grpc.DialOption{ grpc.WithPerRPCCredentials(credentials.TokenSource{o.tokenSource}), grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, "")), } return grpc.Dial(o.endpoint, grpcOpts...) } ================================================ FILE: vendor/google.golang.org/cloud/pubsub/pubsub.go ================================================ // Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package pubsub contains a Google Cloud Pub/Sub client. // // This package is experimental and may make backwards-incompatible changes. // // More information about Google Cloud Pub/Sub is available at // https://cloud.google.com/pubsub/docs package pubsub import ( "encoding/base64" "errors" "fmt" "net/http" "time" "google.golang.org/cloud/internal" "golang.org/x/net/context" "google.golang.org/api/googleapi" raw "google.golang.org/api/pubsub/v1beta2" ) const ( // ScopePubSub grants permissions to view and manage Pub/Sub // topics and subscriptions. ScopePubSub = "https://www.googleapis.com/auth/pubsub" // ScopeCloudPlatform grants permissions to view and manage your data // across Google Cloud Platform services. ScopeCloudPlatform = "https://www.googleapis.com/auth/cloud-platform" ) // batchLimit is maximun size of a single batch. const batchLimit = 1000 // Message represents a Pub/Sub message. type Message struct { // ID identifies this message. ID string // AckID is the identifier to acknowledge this message. AckID string // Data is the actual data in the message. Data []byte // Attributes represents the key-value pairs the current message // is labelled with. Attributes map[string]string } // TODO(jbd): Add subscription and topic listing. // CreateSub creates a Pub/Sub subscription on the backend. // A subscription should subscribe to an existing topic. // // The messages that haven't acknowledged will be pushed back to the // subscription again when the default acknowledgement deadline is // reached. You can override the default deadline by providing a // non-zero deadline. Deadline must not be specified to // precision greater than one second. // // As new messages are being queued on the subscription, you // may recieve push notifications regarding to the new arrivals. // To receive notifications of new messages in the queue, // specify an endpoint callback URL. // If endpoint is an empty string the backend will not notify the // client of new messages. // // If the subscription already exists an error will be returned. func CreateSub(ctx context.Context, name string, topic string, deadline time.Duration, endpoint string) error { sub := &raw.Subscription{ Topic: fullTopicName(internal.ProjID(ctx), topic), } if int64(deadline) > 0 { if !isSec(deadline) { return errors.New("pubsub: deadline must not be specified to precision greater than one second") } sub.AckDeadlineSeconds = int64(deadline / time.Second) } if endpoint != "" { sub.PushConfig = &raw.PushConfig{PushEndpoint: endpoint} } _, err := rawService(ctx).Projects.Subscriptions.Create(fullSubName(internal.ProjID(ctx), name), sub).Do() return err } // DeleteSub deletes the subscription. func DeleteSub(ctx context.Context, name string) error { _, err := rawService(ctx).Projects.Subscriptions.Delete(fullSubName(internal.ProjID(ctx), name)).Do() return err } // ModifyAckDeadline modifies the acknowledgement deadline // for the messages retrieved from the specified subscription. // Deadline must not be specified to precision greater than one second. func ModifyAckDeadline(ctx context.Context, sub string, id string, deadline time.Duration) error { if !isSec(deadline) { return errors.New("pubsub: deadline must not be specified to precision greater than one second") } _, err := rawService(ctx).Projects.Subscriptions.ModifyAckDeadline(fullSubName(internal.ProjID(ctx), sub), &raw.ModifyAckDeadlineRequest{ AckDeadlineSeconds: int64(deadline / time.Second), AckId: id, }).Do() return err } // ModifyPushEndpoint modifies the URL endpoint to modify the resource // to handle push notifications coming from the Pub/Sub backend // for the specified subscription. func ModifyPushEndpoint(ctx context.Context, sub, endpoint string) error { _, err := rawService(ctx).Projects.Subscriptions.ModifyPushConfig(fullSubName(internal.ProjID(ctx), sub), &raw.ModifyPushConfigRequest{ PushConfig: &raw.PushConfig{ PushEndpoint: endpoint, }, }).Do() return err } // SubExists returns true if subscription exists. func SubExists(ctx context.Context, name string) (bool, error) { _, err := rawService(ctx).Projects.Subscriptions.Get(fullSubName(internal.ProjID(ctx), name)).Do() if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { return false, nil } if err != nil { return false, err } return true, nil } // Ack acknowledges one or more Pub/Sub messages on the // specified subscription. func Ack(ctx context.Context, sub string, id ...string) error { for idx, ackID := range id { if ackID == "" { return fmt.Errorf("pubsub: empty ackID detected at index %d", idx) } } _, err := rawService(ctx).Projects.Subscriptions.Acknowledge(fullSubName(internal.ProjID(ctx), sub), &raw.AcknowledgeRequest{ AckIds: id, }).Do() return err } func toMessage(resp *raw.ReceivedMessage) (*Message, error) { if resp.Message == nil { return &Message{AckID: resp.AckId}, nil } data, err := base64.StdEncoding.DecodeString(resp.Message.Data) if err != nil { return nil, err } return &Message{ AckID: resp.AckId, Data: data, Attributes: resp.Message.Attributes, ID: resp.Message.MessageId, }, nil } // Pull pulls messages from the subscription. It returns up to n // number of messages, and n could not be larger than 100. func Pull(ctx context.Context, sub string, n int) ([]*Message, error) { return pull(ctx, sub, n, true) } // PullWait pulls messages from the subscription. If there are not // enough messages left in the subscription queue, it will block until // at least n number of messages arrive or timeout occurs, and n could // not be larger than 100. func PullWait(ctx context.Context, sub string, n int) ([]*Message, error) { return pull(ctx, sub, n, false) } func pull(ctx context.Context, sub string, n int, retImmediately bool) ([]*Message, error) { if n < 1 || n > batchLimit { return nil, fmt.Errorf("pubsub: cannot pull less than one, more than %d messages, but %d was given", batchLimit, n) } resp, err := rawService(ctx).Projects.Subscriptions.Pull(fullSubName(internal.ProjID(ctx), sub), &raw.PullRequest{ ReturnImmediately: retImmediately, MaxMessages: int64(n), }).Do() if err != nil { return nil, err } msgs := make([]*Message, len(resp.ReceivedMessages)) for i := 0; i < len(resp.ReceivedMessages); i++ { msg, err := toMessage(resp.ReceivedMessages[i]) if err != nil { return nil, fmt.Errorf("pubsub: cannot decode the retrieved message at index: %d, PullResponse: %+v", i, resp.ReceivedMessages[i]) } msgs[i] = msg } return msgs, nil } // CreateTopic creates a new topic with the specified name on the backend. // It will return an error if topic already exists. func CreateTopic(ctx context.Context, name string) error { _, err := rawService(ctx).Projects.Topics.Create(fullTopicName(internal.ProjID(ctx), name), &raw.Topic{}).Do() return err } // DeleteTopic deletes the specified topic. func DeleteTopic(ctx context.Context, name string) error { _, err := rawService(ctx).Projects.Topics.Delete(fullTopicName(internal.ProjID(ctx), name)).Do() return err } // TopicExists returns true if a topic exists with the specified name. func TopicExists(ctx context.Context, name string) (bool, error) { _, err := rawService(ctx).Projects.Topics.Get(fullTopicName(internal.ProjID(ctx), name)).Do() if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { return false, nil } if err != nil { return false, err } return true, nil } // Publish publish messages to the topic's subscribers. It returns // message IDs upon success. func Publish(ctx context.Context, topic string, msgs ...*Message) ([]string, error) { var rawMsgs []*raw.PubsubMessage if len(msgs) == 0 { return nil, errors.New("pubsub: no messages to publish") } if len(msgs) > batchLimit { return nil, fmt.Errorf("pubsub: %d messages given, but maximum batch size is %d", len(msgs), batchLimit) } rawMsgs = make([]*raw.PubsubMessage, len(msgs)) for i, msg := range msgs { rawMsgs[i] = &raw.PubsubMessage{ Data: base64.StdEncoding.EncodeToString(msg.Data), Attributes: msg.Attributes, } } resp, err := rawService(ctx).Projects.Topics.Publish(fullTopicName(internal.ProjID(ctx), topic), &raw.PublishRequest{ Messages: rawMsgs, }).Do() if err != nil { return nil, err } return resp.MessageIds, nil } // fullSubName returns the fully qualified name for a subscription. // E.g. /subscriptions/project-id/subscription-name. func fullSubName(proj, name string) string { return fmt.Sprintf("projects/%s/subscriptions/%s", proj, name) } // fullTopicName returns the fully qualified name for a topic. // E.g. /topics/project-id/topic-name. func fullTopicName(proj, name string) string { return fmt.Sprintf("projects/%s/topics/%s", proj, name) } func isSec(dur time.Duration) bool { return dur%time.Second == 0 } func rawService(ctx context.Context) *raw.Service { return internal.Service(ctx, "pubsub", func(hc *http.Client) interface{} { svc, _ := raw.New(hc) return svc }).(*raw.Service) } ================================================ FILE: vendor/google.golang.org/cloud/storage/acl.go ================================================ // Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage import ( "fmt" "golang.org/x/net/context" raw "google.golang.org/api/storage/v1" ) // ACLRole is the the access permission for the entity. type ACLRole string const ( RoleOwner ACLRole = "OWNER" RoleReader ACLRole = "READER" ) // ACLEntity is an entity holding an ACL permission. // // It could be in the form of: // "user-", "user-","group-", "group-", // "domain-" and "project-team-". // // Or one of the predefined constants: AllUsers, AllAuthenticatedUsers. type ACLEntity string const ( AllUsers ACLEntity = "allUsers" AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers" ) // ACLRule represents an access control list rule entry for a Google Cloud Storage object or bucket. // A bucket is a Google Cloud Storage container whose name is globally unique and contains zero or // more objects. An object is a blob of data that is stored in a bucket. type ACLRule struct { // Entity identifies the entity holding the current rule's permissions. Entity ACLEntity // Role is the the access permission for the entity. Role ACLRole } // DefaultACL returns the default object ACL entries for the named bucket. func DefaultACL(ctx context.Context, bucket string) ([]ACLRule, error) { acls, err := rawService(ctx).DefaultObjectAccessControls.List(bucket).Do() if err != nil { return nil, fmt.Errorf("storage: error listing default object ACL for bucket %q: %v", bucket, err) } r := make([]ACLRule, 0, len(acls.Items)) for _, v := range acls.Items { if m, ok := v.(map[string]interface{}); ok { entity, ok1 := m["entity"].(string) role, ok2 := m["role"].(string) if ok1 && ok2 { r = append(r, ACLRule{Entity: ACLEntity(entity), Role: ACLRole(role)}) } } } return r, nil } // PutDefaultACLRule saves the named default object ACL entity with the provided role for the named bucket. func PutDefaultACLRule(ctx context.Context, bucket string, entity ACLEntity, role ACLRole) error { acl := &raw.ObjectAccessControl{ Bucket: bucket, Entity: string(entity), Role: string(role), } _, err := rawService(ctx).DefaultObjectAccessControls.Update(bucket, string(entity), acl).Do() if err != nil { return fmt.Errorf("storage: error updating default ACL rule for bucket %q, entity %q: %v", bucket, entity, err) } return nil } // DeleteDefaultACLRule deletes the named default ACL entity for the named bucket. func DeleteDefaultACLRule(ctx context.Context, bucket string, entity ACLEntity) error { err := rawService(ctx).DefaultObjectAccessControls.Delete(bucket, string(entity)).Do() if err != nil { return fmt.Errorf("storage: error deleting default ACL rule for bucket %q, entity %q: %v", bucket, entity, err) } return nil } // BucketACL returns the ACL entries for the named bucket. func BucketACL(ctx context.Context, bucket string) ([]ACLRule, error) { acls, err := rawService(ctx).BucketAccessControls.List(bucket).Do() if err != nil { return nil, fmt.Errorf("storage: error listing bucket ACL for bucket %q: %v", bucket, err) } r := make([]ACLRule, len(acls.Items)) for i, v := range acls.Items { r[i].Entity = ACLEntity(v.Entity) r[i].Role = ACLRole(v.Role) } return r, nil } // PutBucketACLRule saves the named ACL entity with the provided role for the named bucket. func PutBucketACLRule(ctx context.Context, bucket string, entity ACLEntity, role ACLRole) error { acl := &raw.BucketAccessControl{ Bucket: bucket, Entity: string(entity), Role: string(role), } _, err := rawService(ctx).BucketAccessControls.Update(bucket, string(entity), acl).Do() if err != nil { return fmt.Errorf("storage: error updating bucket ACL rule for bucket %q, entity %q: %v", bucket, entity, err) } return nil } // DeleteBucketACLRule deletes the named ACL entity for the named bucket. func DeleteBucketACLRule(ctx context.Context, bucket string, entity ACLEntity) error { err := rawService(ctx).BucketAccessControls.Delete(bucket, string(entity)).Do() if err != nil { return fmt.Errorf("storage: error deleting bucket ACL rule for bucket %q, entity %q: %v", bucket, entity, err) } return nil } // ACL returns the ACL entries for the named object. func ACL(ctx context.Context, bucket, object string) ([]ACLRule, error) { acls, err := rawService(ctx).ObjectAccessControls.List(bucket, object).Do() if err != nil { return nil, fmt.Errorf("storage: error listing object ACL for bucket %q, file %q: %v", bucket, object, err) } r := make([]ACLRule, 0, len(acls.Items)) for _, v := range acls.Items { if m, ok := v.(map[string]interface{}); ok { entity, ok1 := m["entity"].(string) role, ok2 := m["role"].(string) if ok1 && ok2 { r = append(r, ACLRule{Entity: ACLEntity(entity), Role: ACLRole(role)}) } } } return r, nil } // PutACLRule saves the named ACL entity with the provided role for the named object. func PutACLRule(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole) error { acl := &raw.ObjectAccessControl{ Bucket: bucket, Entity: string(entity), Role: string(role), } _, err := rawService(ctx).ObjectAccessControls.Update(bucket, object, string(entity), acl).Do() if err != nil { return fmt.Errorf("storage: error updating object ACL rule for bucket %q, file %q, entity %q: %v", bucket, object, entity, err) } return nil } // DeleteACLRule deletes the named ACL entity for the named object. func DeleteACLRule(ctx context.Context, bucket, object string, entity ACLEntity) error { err := rawService(ctx).ObjectAccessControls.Delete(bucket, object, string(entity)).Do() if err != nil { return fmt.Errorf("storage: error deleting object ACL rule for bucket %q, file %q, entity %q: %v", bucket, object, entity, err) } return nil } ================================================ FILE: vendor/google.golang.org/cloud/storage/storage.go ================================================ // Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package storage contains a Google Cloud Storage client. // // This package is experimental and may make backwards-incompatible changes. package storage import ( "crypto" "crypto/rand" "crypto/rsa" "crypto/sha256" "crypto/x509" "encoding/base64" "encoding/pem" "errors" "fmt" "io" "net/http" "net/url" "strings" "time" "google.golang.org/cloud/internal" "golang.org/x/net/context" "google.golang.org/api/googleapi" raw "google.golang.org/api/storage/v1" ) var ( ErrBucketNotExist = errors.New("storage: bucket doesn't exist") ErrObjectNotExist = errors.New("storage: object doesn't exist") ) const ( // ScopeFullControl grants permissions to manage your // data and permissions in Google Cloud Storage. ScopeFullControl = raw.DevstorageFullControlScope // ScopeReadOnly grants permissions to // view your data in Google Cloud Storage. ScopeReadOnly = raw.DevstorageReadOnlyScope // ScopeReadWrite grants permissions to manage your // data in Google Cloud Storage. ScopeReadWrite = raw.DevstorageReadWriteScope ) // TODO(jbd): Add storage.buckets.list. // TODO(jbd): Add storage.buckets.insert. // TODO(jbd): Add storage.buckets.update. // TODO(jbd): Add storage.buckets.delete. // TODO(jbd): Add storage.objects.watch. // BucketInfo returns the metadata for the specified bucket. func BucketInfo(ctx context.Context, name string) (*Bucket, error) { resp, err := rawService(ctx).Buckets.Get(name).Projection("full").Do() if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { return nil, ErrBucketNotExist } if err != nil { return nil, err } return newBucket(resp), nil } // ListObjects lists objects from the bucket. You can specify a query // to filter the results. If q is nil, no filtering is applied. func ListObjects(ctx context.Context, bucket string, q *Query) (*Objects, error) { c := rawService(ctx).Objects.List(bucket) c.Projection("full") if q != nil { c.Delimiter(q.Delimiter) c.Prefix(q.Prefix) c.Versions(q.Versions) c.PageToken(q.Cursor) if q.MaxResults > 0 { c.MaxResults(int64(q.MaxResults)) } } resp, err := c.Do() if err != nil { return nil, err } objects := &Objects{ Results: make([]*Object, len(resp.Items)), Prefixes: make([]string, len(resp.Prefixes)), } for i, item := range resp.Items { objects.Results[i] = newObject(item) } for i, prefix := range resp.Prefixes { objects.Prefixes[i] = prefix } if resp.NextPageToken != "" { next := Query{} if q != nil { // keep the other filtering // criteria if there is a query next = *q } next.Cursor = resp.NextPageToken objects.Next = &next } return objects, nil } // SignedURLOptions allows you to restrict the access to the signed URL. type SignedURLOptions struct { // GoogleAccessID represents the authorizer of the signed URL generation. // It is typically the Google service account client email address from // the Google Developers Console in the form of "xxx@developer.gserviceaccount.com". // Required. GoogleAccessID string // PrivateKey is the Google service account private key. It is obtainable // from the Google Developers Console. // At https://console.developers.google.com/project//apiui/credential, // create a service account client ID or reuse one of your existing service account // credentials. Click on the "Generate new P12 key" to generate and download // a new private key. Once you download the P12 file, use the following command // to convert it into a PEM file. // // $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes // // Provide the contents of the PEM file as a byte slice. // Required. PrivateKey []byte // Method is the HTTP method to be used with the signed URL. // Signed URLs can be used with GET, HEAD, PUT, and DELETE requests. // Required. Method string // Expires is the expiration time on the signed URL. It must be // a datetime in the future. // Required. Expires time.Time // ContentType is the content type header the client must provide // to use the generated signed URL. // Optional. ContentType string // Headers is a list of extention headers the client must provide // in order to use the generated signed URL. // Optional. Headers []string // MD5 is the base64 encoded MD5 checksum of the file. // If provided, the client should provide the exact value on the request // header in order to use the signed URL. // Optional. MD5 []byte } // SignedURL returns a URL for the specified object. Signed URLs allow // the users access to a restricted resource for a limited time without having a // Google account or signing in. For more information about the signed // URLs, see https://cloud.google.com/storage/docs/accesscontrol#Signed-URLs. func SignedURL(bucket, name string, opts *SignedURLOptions) (string, error) { if opts == nil { return "", errors.New("storage: missing required SignedURLOptions") } if opts.GoogleAccessID == "" || opts.PrivateKey == nil { return "", errors.New("storage: missing required credentials to generate a signed URL") } if opts.Method == "" { return "", errors.New("storage: missing required method option") } if opts.Expires.IsZero() { return "", errors.New("storage: missing required expires option") } key, err := parseKey(opts.PrivateKey) if err != nil { return "", err } h := sha256.New() fmt.Fprintf(h, "%s\n", opts.Method) fmt.Fprintf(h, "%s\n", opts.MD5) fmt.Fprintf(h, "%s\n", opts.ContentType) fmt.Fprintf(h, "%d\n", opts.Expires.Unix()) fmt.Fprintf(h, "%s", strings.Join(opts.Headers, "\n")) fmt.Fprintf(h, "/%s/%s", bucket, name) b, err := rsa.SignPKCS1v15( rand.Reader, key, crypto.SHA256, h.Sum(nil), ) if err != nil { return "", err } encoded := base64.StdEncoding.EncodeToString(b) u := &url.URL{ Scheme: "https", Host: "storage.googleapis.com", Path: fmt.Sprintf("/%s/%s", bucket, name), } q := u.Query() q.Set("GoogleAccessId", opts.GoogleAccessID) q.Set("Expires", fmt.Sprintf("%d", opts.Expires.Unix())) q.Set("Signature", string(encoded)) u.RawQuery = q.Encode() return u.String(), nil } // StatObject returns meta information about the specified object. func StatObject(ctx context.Context, bucket, name string) (*Object, error) { o, err := rawService(ctx).Objects.Get(bucket, name).Projection("full").Do() if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { return nil, ErrObjectNotExist } if err != nil { return nil, err } return newObject(o), nil } // UpdateAttrs updates an object with the provided attributes. // All zero-value attributes are ignored. func UpdateAttrs(ctx context.Context, bucket, name string, attrs ObjectAttrs) (*Object, error) { o, err := rawService(ctx).Objects.Patch(bucket, name, attrs.toRawObject(bucket)).Projection("full").Do() if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { return nil, ErrObjectNotExist } if err != nil { return nil, err } return newObject(o), nil } // DeleteObject deletes the single specified object. func DeleteObject(ctx context.Context, bucket, name string) error { return rawService(ctx).Objects.Delete(bucket, name).Do() } // CopyObject copies the source object to the destination. // The copied object's attributes are overwritten by attrs if non-nil. func CopyObject(ctx context.Context, srcBucket, srcName string, destBucket, destName string, attrs *ObjectAttrs) (*Object, error) { if srcBucket == "" || destBucket == "" { return nil, errors.New("storage: srcBucket and destBucket must both be non-empty") } if srcName == "" || destName == "" { return nil, errors.New("storage: srcName and destName must be non-empty") } var rawObject *raw.Object if attrs != nil { attrs.Name = destName if attrs.ContentType == "" { return nil, errors.New("storage: attrs.ContentType must be non-empty") } rawObject = attrs.toRawObject(destBucket) } o, err := rawService(ctx).Objects.Copy( srcBucket, srcName, destBucket, destName, rawObject).Projection("full").Do() if err != nil { return nil, err } return newObject(o), nil } // NewReader creates a new io.ReadCloser to read the contents // of the object. func NewReader(ctx context.Context, bucket, name string) (io.ReadCloser, error) { hc := internal.HTTPClient(ctx) u := &url.URL{ Scheme: "https", Host: "storage.googleapis.com", Path: fmt.Sprintf("/%s/%s", bucket, name), } res, err := hc.Get(u.String()) if err != nil { return nil, err } if res.StatusCode == http.StatusNotFound { res.Body.Close() return nil, ErrObjectNotExist } if res.StatusCode < 200 || res.StatusCode > 299 { res.Body.Close() return res.Body, fmt.Errorf("storage: can't read object %v/%v, status code: %v", bucket, name, res.Status) } return res.Body, nil } // NewWriter returns a storage Writer that writes to the GCS object // identified by the specified name. // If such an object doesn't exist, it creates one. // Attributes can be set on the object by modifying the returned Writer's // ObjectAttrs field before the first call to Write. The name parameter to this // function is ignored if the Name field of the ObjectAttrs field is set to a // non-empty string. // // It is the caller's responsibility to call Close when writing is done. // // The object is not available and any previous object with the same // name is not replaced on Cloud Storage until Close is called. func NewWriter(ctx context.Context, bucket, name string) *Writer { return &Writer{ ctx: ctx, bucket: bucket, name: name, donec: make(chan struct{}), } } func rawService(ctx context.Context) *raw.Service { return internal.Service(ctx, "storage", func(hc *http.Client) interface{} { svc, _ := raw.New(hc) return svc }).(*raw.Service) } // parseKey converts the binary contents of a private key file // to an *rsa.PrivateKey. It detects whether the private key is in a // PEM container or not. If so, it extracts the the private key // from PEM container before conversion. It only supports PEM // containers with no passphrase. func parseKey(key []byte) (*rsa.PrivateKey, error) { if block, _ := pem.Decode(key); block != nil { key = block.Bytes } parsedKey, err := x509.ParsePKCS8PrivateKey(key) if err != nil { parsedKey, err = x509.ParsePKCS1PrivateKey(key) if err != nil { return nil, err } } parsed, ok := parsedKey.(*rsa.PrivateKey) if !ok { return nil, errors.New("oauth2: private key is invalid") } return parsed, nil } ================================================ FILE: vendor/google.golang.org/cloud/storage/types.go ================================================ // Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage import ( "encoding/base64" "io" "sync" "time" "golang.org/x/net/context" raw "google.golang.org/api/storage/v1" ) // Bucket represents a Google Cloud Storage bucket. type Bucket struct { // Name is the name of the bucket. Name string // ACL is the list of access control rules on the bucket. ACL []ACLRule // DefaultObjectACL is the list of access controls to // apply to new objects when no object ACL is provided. DefaultObjectACL []ACLRule // Location is the location of the bucket. It defaults to "US". Location string // Metageneration is the metadata generation of the bucket. // Read-only. Metageneration int64 // StorageClass is the storage class of the bucket. This defines // how objects in the bucket are stored and determines the SLA // and the cost of storage. Typical values are "STANDARD" and // "DURABLE_REDUCED_AVAILABILITY". Defaults to "STANDARD". StorageClass string // Created is the creation time of the bucket. // Read-only. Created time.Time } func newBucket(b *raw.Bucket) *Bucket { if b == nil { return nil } bucket := &Bucket{ Name: b.Name, Location: b.Location, Metageneration: b.Metageneration, StorageClass: b.StorageClass, Created: convertTime(b.TimeCreated), } acl := make([]ACLRule, len(b.Acl)) for i, rule := range b.Acl { acl[i] = ACLRule{ Entity: ACLEntity(rule.Entity), Role: ACLRole(rule.Role), } } bucket.ACL = acl objACL := make([]ACLRule, len(b.DefaultObjectAcl)) for i, rule := range b.DefaultObjectAcl { objACL[i] = ACLRule{ Entity: ACLEntity(rule.Entity), Role: ACLRole(rule.Role), } } bucket.DefaultObjectACL = objACL return bucket } // ObjectAttrs is the user-editable object attributes. type ObjectAttrs struct { // Name is the name of the object. Name string // ContentType is the MIME type of the object's content. // Optional. ContentType string // ContentLanguage is the optional RFC 1766 Content-Language of // the object's content sent in response headers. ContentLanguage string // ContentEncoding is the optional Content-Encoding of the object // sent it the response headers. ContentEncoding string // CacheControl is the optional Cache-Control header of the object // sent in the response headers. CacheControl string // ACL is the list of access control rules for the object. // Optional. If nil or empty, existing ACL rules are preserved. ACL []ACLRule // Metadata represents user-provided metadata, in key/value pairs. // It can be nil if the current metadata values needs to preserved. Metadata map[string]string } func (o ObjectAttrs) toRawObject(bucket string) *raw.Object { var acl []*raw.ObjectAccessControl if len(o.ACL) > 0 { acl = make([]*raw.ObjectAccessControl, len(o.ACL)) for i, rule := range o.ACL { acl[i] = &raw.ObjectAccessControl{ Entity: string(rule.Entity), Role: string(rule.Role), } } } return &raw.Object{ Bucket: bucket, Name: o.Name, ContentType: o.ContentType, ContentEncoding: o.ContentEncoding, ContentLanguage: o.ContentLanguage, CacheControl: o.CacheControl, Acl: acl, Metadata: o.Metadata, } } // Object represents a Google Cloud Storage (GCS) object. type Object struct { // Bucket is the name of the bucket containing this GCS object. Bucket string // Name is the name of the object within the bucket. Name string // ContentType is the MIME type of the object's content. ContentType string // ContentLanguage is the content language of the object's content. ContentLanguage string // CacheControl is the Cache-Control header to be sent in the response // headers when serving the object data. CacheControl string // ACL is the list of access control rules for the object. ACL []ACLRule // Owner is the owner of the object. // // If non-zero, it is in the form of "user-". Owner string // Size is the length of the object's content. Size int64 // ContentEncoding is the encoding of the object's content. ContentEncoding string // MD5 is the MD5 hash of the object's content. MD5 []byte // CRC32C is the CRC32 checksum of the object's content using // the Castagnoli93 polynomial. CRC32C uint32 // MediaLink is an URL to the object's content. MediaLink string // Metadata represents user-provided metadata, in key/value pairs. // It can be nil if no metadata is provided. Metadata map[string]string // Generation is the generation number of the object's content. Generation int64 // MetaGeneration is the version of the metadata for this // object at this generation. This field is used for preconditions // and for detecting changes in metadata. A metageneration number // is only meaningful in the context of a particular generation // of a particular object. MetaGeneration int64 // StorageClass is the storage class of the bucket. // This value defines how objects in the bucket are stored and // determines the SLA and the cost of storage. Typical values are // "STANDARD" and "DURABLE_REDUCED_AVAILABILITY". // It defaults to "STANDARD". StorageClass string // Deleted is the time the object was deleted. // If not deleted, it is the zero value. Deleted time.Time // Updated is the creation or modification time of the object. // For buckets with versioning enabled, changing an object's // metadata does not change this property. Updated time.Time } // convertTime converts a time in RFC3339 format to time.Time. // If any error occurs in parsing, the zero-value time.Time is silently returned. func convertTime(t string) time.Time { var r time.Time if t != "" { r, _ = time.Parse(time.RFC3339, t) } return r } func newObject(o *raw.Object) *Object { if o == nil { return nil } acl := make([]ACLRule, len(o.Acl)) for i, rule := range o.Acl { acl[i] = ACLRule{ Entity: ACLEntity(rule.Entity), Role: ACLRole(rule.Role), } } owner := "" if o.Owner != nil { owner = o.Owner.Entity } md5, _ := base64.StdEncoding.DecodeString(o.Md5Hash) var crc32c uint32 d, err := base64.StdEncoding.DecodeString(o.Crc32c) if err == nil && len(d) == 4 { crc32c = uint32(d[0])<<24 + uint32(d[1])<<16 + uint32(d[2])<<8 + uint32(d[3]) } return &Object{ Bucket: o.Bucket, Name: o.Name, ContentType: o.ContentType, ContentLanguage: o.ContentLanguage, CacheControl: o.CacheControl, ACL: acl, Owner: owner, ContentEncoding: o.ContentEncoding, Size: int64(o.Size), MD5: md5, CRC32C: crc32c, MediaLink: o.MediaLink, Metadata: o.Metadata, Generation: o.Generation, MetaGeneration: o.Metageneration, StorageClass: o.StorageClass, Deleted: convertTime(o.TimeDeleted), Updated: convertTime(o.Updated), } } // Query represents a query to filter objects from a bucket. type Query struct { // Delimiter returns results in a directory-like fashion. // Results will contain only objects whose names, aside from the // prefix, do not contain delimiter. Objects whose names, // aside from the prefix, contain delimiter will have their name, // truncated after the delimiter, returned in prefixes. // Duplicate prefixes are omitted. // Optional. Delimiter string // Prefix is the prefix filter to query objects // whose names begin with this prefix. // Optional. Prefix string // Versions indicates whether multiple versions of the same // object will be included in the results. Versions bool // Cursor is a previously-returned page token // representing part of the larger set of results to view. // Optional. Cursor string // MaxResults is the maximum number of items plus prefixes // to return. As duplicate prefixes are omitted, // fewer total results may be returned than requested. // The default page limit is used if it is negative or zero. MaxResults int } // Objects represents a list of objects returned from // a bucket look-p request and a query to retrieve more // objects from the next pages. type Objects struct { // Results represent a list of object results. Results []*Object // Next is the continuation query to retrieve more // results with the same filtering criteria. If there // are no more results to retrieve, it is nil. Next *Query // Prefixes represents prefixes of objects // matching-but-not-listed up to and including // the requested delimiter. Prefixes []string } // contentTyper implements ContentTyper to enable an // io.ReadCloser to specify its MIME type. type contentTyper struct { io.Reader t string } func (c *contentTyper) ContentType() string { return c.t } // A Writer writes a Cloud Storage object. type Writer struct { // ObjectAttrs are optional attributes to set on the object. Any attributes // must be initialized before the first Write call. Nil or zero-valued // attributes are ignored. ObjectAttrs ctx context.Context bucket string name string once sync.Once opened bool r io.Reader pw *io.PipeWriter donec chan struct{} // closed after err and obj are set. err error obj *Object } func (w *Writer) open() { attrs := w.ObjectAttrs // Always set the name, otherwise the backend // rejects the request and responds with an HTTP 400. if attrs.Name == "" { attrs.Name = w.name } pr, pw := io.Pipe() w.r = &contentTyper{pr, attrs.ContentType} w.pw = pw w.opened = true go func() { resp, err := rawService(w.ctx).Objects.Insert( w.bucket, attrs.toRawObject(w.bucket)).Media(w.r).Projection("full").Do() w.err = err if err == nil { w.obj = newObject(resp) } else { pr.CloseWithError(w.err) } close(w.donec) }() } // Write appends to w. func (w *Writer) Write(p []byte) (n int, err error) { if w.err != nil { return 0, w.err } if !w.opened { w.open() } return w.pw.Write(p) } // Close completes the write operation and flushes any buffered data. // If Close doesn't return an error, metadata about the written object // can be retrieved by calling Object. func (w *Writer) Close() error { if !w.opened { w.open() } if err := w.pw.Close(); err != nil { return err } <-w.donec return w.err } // Object returns metadata about a successfully-written object. // It's only valid to call it after Close returns nil. func (w *Writer) Object() *Object { return w.obj } ================================================ FILE: vendor/google.golang.org/grpc/.travis.yml ================================================ sudo: false language: go install: - go get -v -t -d google.golang.org/grpc/... script: - go test -v -cpu 1,4 google.golang.org/grpc/... - go test -v -race -cpu 1,4 google.golang.org/grpc/... ================================================ FILE: vendor/google.golang.org/grpc/CONTRIBUTING.md ================================================ # How to contribute We definitely welcome patches and contribution to grpc! Here is some guideline and information about how to do so. ## Getting started ### Legal requirements In order to protect both you and ourselves, you will need to sign the [Contributor License Agreement](https://cla.developers.google.com/clas). ### Filing Issues When filing an issue, make sure to answer these five questions: 1. What version of Go are you using (`go version`)? 2. What operating system and processor architecture are you using? 3. What did you do? 4. What did you expect to see? 5. What did you see instead? ### Contributing code Please read the Contribution Guidelines before sending patches. We will not accept GitHub pull requests once Gerrit is setup (we will use Gerrit instead for code review). Unless otherwise noted, the Go source files are distributed under the BSD-style license found in the LICENSE file. ================================================ FILE: vendor/google.golang.org/grpc/LICENSE ================================================ Copyright 2014, Google Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================ FILE: vendor/google.golang.org/grpc/PATENTS ================================================ Additional IP Rights Grant (Patents) "This implementation" means the copyrightable works distributed by Google as part of the GRPC project. Google hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, transfer and otherwise run, modify and propagate the contents of this implementation of GRPC, where such license applies only to those patent claims, both currently owned or controlled by Google and acquired in the future, licensable by Google that are necessarily infringed by this implementation of GRPC. This grant does not include claims that would be infringed only as a consequence of further modification of this implementation. If you or your agent or exclusive licensee institute or order or agree to the institution of patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that this implementation of GRPC or any code incorporated within this implementation of GRPC constitutes direct or contributory patent infringement, or inducement of patent infringement, then any patent rights granted to you under this License for this implementation of GRPC shall terminate as of the date such litigation is filed. ================================================ FILE: vendor/google.golang.org/grpc/README.md ================================================ #gRPC-Go [![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) The Go implementation of [gRPC](https://github.com/grpc/grpc) Installation ------------ To install this package, you need to install Go 1.4 and setup your Go workspace on your computer. The simplest way to install the library is to run: ``` $ go get google.golang.org/grpc ``` Documentation ------------- You can find more detailed documentation and examples in the [grpc-common repository](http://github.com/grpc/grpc-common). Status ------ Alpha - ready for early adopters. ================================================ FILE: vendor/google.golang.org/grpc/benchmark/benchmark.go ================================================ /* * * Copyright 2014, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ /* Package benchmark implements the building blocks to setup end-to-end gRPC benchmarks. */ package benchmark import ( "io" "math" "net" "golang.org/x/net/context" "google.golang.org/grpc" testpb "google.golang.org/grpc/benchmark/grpc_testing" "google.golang.org/grpc/grpclog" ) func newPayload(t testpb.PayloadType, size int) *testpb.Payload { if size < 0 { grpclog.Fatalf("Requested a response with invalid length %d", size) } body := make([]byte, size) switch t { case testpb.PayloadType_COMPRESSABLE: case testpb.PayloadType_UNCOMPRESSABLE: grpclog.Fatalf("PayloadType UNCOMPRESSABLE is not supported") default: grpclog.Fatalf("Unsupported payload type: %d", t) } return &testpb.Payload{ Type: t, Body: body, } } type testServer struct { } func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { return &testpb.SimpleResponse{ Payload: newPayload(in.ResponseType, int(in.ResponseSize)), }, nil } func (s *testServer) StreamingCall(stream testpb.TestService_StreamingCallServer) error { for { in, err := stream.Recv() if err == io.EOF { // read done. return nil } if err != nil { return err } if err := stream.Send(&testpb.SimpleResponse{ Payload: newPayload(in.ResponseType, int(in.ResponseSize)), }); err != nil { return err } } } // StartServer starts a gRPC server serving a benchmark service. It returns its // listen address and a function to stop the server. func StartServer() (string, func()) { lis, err := net.Listen("tcp", ":0") if err != nil { grpclog.Fatalf("Failed to listen: %v", err) } s := grpc.NewServer(grpc.MaxConcurrentStreams(math.MaxUint32)) testpb.RegisterTestServiceServer(s, &testServer{}) go s.Serve(lis) return lis.Addr().String(), func() { s.Stop() } } // DoUnaryCall performs an unary RPC with given stub and request and response sizes. func DoUnaryCall(tc testpb.TestServiceClient, reqSize, respSize int) { pl := newPayload(testpb.PayloadType_COMPRESSABLE, reqSize) req := &testpb.SimpleRequest{ ResponseType: pl.Type, ResponseSize: int32(respSize), Payload: pl, } if _, err := tc.UnaryCall(context.Background(), req); err != nil { grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) } } // DoStreamingRoundTrip performs a round trip for a single streaming rpc. func DoStreamingRoundTrip(tc testpb.TestServiceClient, stream testpb.TestService_StreamingCallClient, reqSize, respSize int) { pl := newPayload(testpb.PayloadType_COMPRESSABLE, reqSize) req := &testpb.SimpleRequest{ ResponseType: pl.Type, ResponseSize: int32(respSize), Payload: pl, } if err := stream.Send(req); err != nil { grpclog.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) } if _, err := stream.Recv(); err != nil { grpclog.Fatal("%v.StreamingCall(_) = _, %v", tc, err) } } // NewClientConn creates a gRPC client connection to addr. func NewClientConn(addr string) *grpc.ClientConn { conn, err := grpc.Dial(addr) if err != nil { grpclog.Fatalf("NewClientConn(%q) failed to create a ClientConn %v", addr, err) } return conn } ================================================ FILE: vendor/google.golang.org/grpc/benchmark/client/main.go ================================================ package main import ( "flag" "math" "net" "net/http" _ "net/http/pprof" "sync" "time" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/benchmark" testpb "google.golang.org/grpc/benchmark/grpc_testing" "google.golang.org/grpc/benchmark/stats" "google.golang.org/grpc/grpclog" ) var ( server = flag.String("server", "", "The server address") maxConcurrentRPCs = flag.Int("max_concurrent_rpcs", 1, "The max number of concurrent RPCs") duration = flag.Int("duration", math.MaxInt32, "The duration in seconds to run the benchmark client") rpcType = flag.Int("rpc_type", 0, `Configure different client rpc type. Valid options are: 0 : unary call; 1 : streaming call.`) ) func unaryCaller(client testpb.TestServiceClient) { benchmark.DoUnaryCall(client, 1, 1) } func streamCaller(client testpb.TestServiceClient, stream testpb.TestService_StreamingCallClient) { benchmark.DoStreamingRoundTrip(client, stream, 1, 1) } func buildConnection() (s *stats.Stats, conn *grpc.ClientConn, tc testpb.TestServiceClient) { s = stats.NewStats(256) conn = benchmark.NewClientConn(*server) tc = testpb.NewTestServiceClient(conn) return s, conn, tc } func closeLoopUnary() { s, conn, tc := buildConnection() for i := 0; i < 100; i++ { unaryCaller(tc) } ch := make(chan int, *maxConcurrentRPCs*4) var ( mu sync.Mutex wg sync.WaitGroup ) wg.Add(*maxConcurrentRPCs) for i := 0; i < *maxConcurrentRPCs; i++ { go func() { for _ = range ch { start := time.Now() unaryCaller(tc) elapse := time.Since(start) mu.Lock() s.Add(elapse) mu.Unlock() } wg.Done() }() } // Stop the client when time is up. done := make(chan struct{}) go func() { <-time.After(time.Duration(*duration) * time.Second) close(done) }() ok := true for ok { select { case ch <- 0: case <-done: ok = false } } close(ch) wg.Wait() conn.Close() grpclog.Println(s.String()) } func closeLoopStream() { s, conn, tc := buildConnection() stream, err := tc.StreamingCall(context.Background()) if err != nil { grpclog.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) } for i := 0; i < 100; i++ { streamCaller(tc, stream) } ch := make(chan int, *maxConcurrentRPCs*4) var ( mu sync.Mutex wg sync.WaitGroup ) wg.Add(*maxConcurrentRPCs) // Distribute RPCs over maxConcurrentCalls workers. for i := 0; i < *maxConcurrentRPCs; i++ { go func() { for _ = range ch { start := time.Now() streamCaller(tc, stream) elapse := time.Since(start) mu.Lock() s.Add(elapse) mu.Unlock() } wg.Done() }() } // Stop the client when time is up. done := make(chan struct{}) go func() { <-time.After(time.Duration(*duration) * time.Second) close(done) }() ok := true for ok { select { case ch <- 0: case <-done: ok = false } } close(ch) wg.Wait() conn.Close() grpclog.Println(s.String()) } func main() { flag.Parse() go func() { lis, err := net.Listen("tcp", ":0") if err != nil { grpclog.Fatalf("Failed to listen: %v", err) } grpclog.Println("Client profiling address: ", lis.Addr().String()) if err := http.Serve(lis, nil); err != nil { grpclog.Fatalf("Failed to serve: %v", err) } }() switch *rpcType { case 0: closeLoopUnary() case 1: closeLoopStream() } } ================================================ FILE: vendor/google.golang.org/grpc/benchmark/grpc_testing/test.pb.go ================================================ // Code generated by protoc-gen-go. // source: test.proto // DO NOT EDIT! /* Package grpc_testing is a generated protocol buffer package. It is generated from these files: test.proto It has these top-level messages: StatsRequest ServerStats Payload HistogramData ClientConfig Mark ClientArgs ClientStats ClientStatus ServerConfig ServerArgs ServerStatus SimpleRequest SimpleResponse */ package grpc_testing import proto "github.com/golang/protobuf/proto" import ( context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal type PayloadType int32 const ( // Compressable text format. PayloadType_COMPRESSABLE PayloadType = 0 // Uncompressable binary format. PayloadType_UNCOMPRESSABLE PayloadType = 1 // Randomly chosen from all other formats defined in this enum. PayloadType_RANDOM PayloadType = 2 ) var PayloadType_name = map[int32]string{ 0: "COMPRESSABLE", 1: "UNCOMPRESSABLE", 2: "RANDOM", } var PayloadType_value = map[string]int32{ "COMPRESSABLE": 0, "UNCOMPRESSABLE": 1, "RANDOM": 2, } func (x PayloadType) String() string { return proto.EnumName(PayloadType_name, int32(x)) } type ClientType int32 const ( ClientType_SYNCHRONOUS_CLIENT ClientType = 0 ClientType_ASYNC_CLIENT ClientType = 1 ) var ClientType_name = map[int32]string{ 0: "SYNCHRONOUS_CLIENT", 1: "ASYNC_CLIENT", } var ClientType_value = map[string]int32{ "SYNCHRONOUS_CLIENT": 0, "ASYNC_CLIENT": 1, } func (x ClientType) String() string { return proto.EnumName(ClientType_name, int32(x)) } type ServerType int32 const ( ServerType_SYNCHRONOUS_SERVER ServerType = 0 ServerType_ASYNC_SERVER ServerType = 1 ) var ServerType_name = map[int32]string{ 0: "SYNCHRONOUS_SERVER", 1: "ASYNC_SERVER", } var ServerType_value = map[string]int32{ "SYNCHRONOUS_SERVER": 0, "ASYNC_SERVER": 1, } func (x ServerType) String() string { return proto.EnumName(ServerType_name, int32(x)) } type RpcType int32 const ( RpcType_UNARY RpcType = 0 RpcType_STREAMING RpcType = 1 ) var RpcType_name = map[int32]string{ 0: "UNARY", 1: "STREAMING", } var RpcType_value = map[string]int32{ "UNARY": 0, "STREAMING": 1, } func (x RpcType) String() string { return proto.EnumName(RpcType_name, int32(x)) } type StatsRequest struct { // run number TestNum int32 `protobuf:"varint,1,opt,name=test_num" json:"test_num,omitempty"` } func (m *StatsRequest) Reset() { *m = StatsRequest{} } func (m *StatsRequest) String() string { return proto.CompactTextString(m) } func (*StatsRequest) ProtoMessage() {} type ServerStats struct { // wall clock time TimeElapsed float64 `protobuf:"fixed64,1,opt,name=time_elapsed" json:"time_elapsed,omitempty"` // user time used by the server process and threads TimeUser float64 `protobuf:"fixed64,2,opt,name=time_user" json:"time_user,omitempty"` // server time used by the server process and all threads TimeSystem float64 `protobuf:"fixed64,3,opt,name=time_system" json:"time_system,omitempty"` } func (m *ServerStats) Reset() { *m = ServerStats{} } func (m *ServerStats) String() string { return proto.CompactTextString(m) } func (*ServerStats) ProtoMessage() {} type Payload struct { // The type of data in body. Type PayloadType `protobuf:"varint,1,opt,name=type,enum=grpc.testing.PayloadType" json:"type,omitempty"` // Primary contents of payload. Body []byte `protobuf:"bytes,2,opt,name=body,proto3" json:"body,omitempty"` } func (m *Payload) Reset() { *m = Payload{} } func (m *Payload) String() string { return proto.CompactTextString(m) } func (*Payload) ProtoMessage() {} type HistogramData struct { Bucket []uint32 `protobuf:"varint,1,rep,name=bucket" json:"bucket,omitempty"` MinSeen float64 `protobuf:"fixed64,2,opt,name=min_seen" json:"min_seen,omitempty"` MaxSeen float64 `protobuf:"fixed64,3,opt,name=max_seen" json:"max_seen,omitempty"` Sum float64 `protobuf:"fixed64,4,opt,name=sum" json:"sum,omitempty"` SumOfSquares float64 `protobuf:"fixed64,5,opt,name=sum_of_squares" json:"sum_of_squares,omitempty"` Count float64 `protobuf:"fixed64,6,opt,name=count" json:"count,omitempty"` } func (m *HistogramData) Reset() { *m = HistogramData{} } func (m *HistogramData) String() string { return proto.CompactTextString(m) } func (*HistogramData) ProtoMessage() {} type ClientConfig struct { ServerTargets []string `protobuf:"bytes,1,rep,name=server_targets" json:"server_targets,omitempty"` ClientType ClientType `protobuf:"varint,2,opt,name=client_type,enum=grpc.testing.ClientType" json:"client_type,omitempty"` EnableSsl bool `protobuf:"varint,3,opt,name=enable_ssl" json:"enable_ssl,omitempty"` OutstandingRpcsPerChannel int32 `protobuf:"varint,4,opt,name=outstanding_rpcs_per_channel" json:"outstanding_rpcs_per_channel,omitempty"` ClientChannels int32 `protobuf:"varint,5,opt,name=client_channels" json:"client_channels,omitempty"` PayloadSize int32 `protobuf:"varint,6,opt,name=payload_size" json:"payload_size,omitempty"` // only for async client: AsyncClientThreads int32 `protobuf:"varint,7,opt,name=async_client_threads" json:"async_client_threads,omitempty"` RpcType RpcType `protobuf:"varint,8,opt,name=rpc_type,enum=grpc.testing.RpcType" json:"rpc_type,omitempty"` } func (m *ClientConfig) Reset() { *m = ClientConfig{} } func (m *ClientConfig) String() string { return proto.CompactTextString(m) } func (*ClientConfig) ProtoMessage() {} // Request current stats type Mark struct { } func (m *Mark) Reset() { *m = Mark{} } func (m *Mark) String() string { return proto.CompactTextString(m) } func (*Mark) ProtoMessage() {} type ClientArgs struct { Setup *ClientConfig `protobuf:"bytes,1,opt,name=setup" json:"setup,omitempty"` Mark *Mark `protobuf:"bytes,2,opt,name=mark" json:"mark,omitempty"` } func (m *ClientArgs) Reset() { *m = ClientArgs{} } func (m *ClientArgs) String() string { return proto.CompactTextString(m) } func (*ClientArgs) ProtoMessage() {} func (m *ClientArgs) GetSetup() *ClientConfig { if m != nil { return m.Setup } return nil } func (m *ClientArgs) GetMark() *Mark { if m != nil { return m.Mark } return nil } type ClientStats struct { Latencies *HistogramData `protobuf:"bytes,1,opt,name=latencies" json:"latencies,omitempty"` TimeElapsed float64 `protobuf:"fixed64,3,opt,name=time_elapsed" json:"time_elapsed,omitempty"` TimeUser float64 `protobuf:"fixed64,4,opt,name=time_user" json:"time_user,omitempty"` TimeSystem float64 `protobuf:"fixed64,5,opt,name=time_system" json:"time_system,omitempty"` } func (m *ClientStats) Reset() { *m = ClientStats{} } func (m *ClientStats) String() string { return proto.CompactTextString(m) } func (*ClientStats) ProtoMessage() {} func (m *ClientStats) GetLatencies() *HistogramData { if m != nil { return m.Latencies } return nil } type ClientStatus struct { Stats *ClientStats `protobuf:"bytes,1,opt,name=stats" json:"stats,omitempty"` } func (m *ClientStatus) Reset() { *m = ClientStatus{} } func (m *ClientStatus) String() string { return proto.CompactTextString(m) } func (*ClientStatus) ProtoMessage() {} func (m *ClientStatus) GetStats() *ClientStats { if m != nil { return m.Stats } return nil } type ServerConfig struct { ServerType ServerType `protobuf:"varint,1,opt,name=server_type,enum=grpc.testing.ServerType" json:"server_type,omitempty"` Threads int32 `protobuf:"varint,2,opt,name=threads" json:"threads,omitempty"` EnableSsl bool `protobuf:"varint,3,opt,name=enable_ssl" json:"enable_ssl,omitempty"` } func (m *ServerConfig) Reset() { *m = ServerConfig{} } func (m *ServerConfig) String() string { return proto.CompactTextString(m) } func (*ServerConfig) ProtoMessage() {} type ServerArgs struct { Setup *ServerConfig `protobuf:"bytes,1,opt,name=setup" json:"setup,omitempty"` Mark *Mark `protobuf:"bytes,2,opt,name=mark" json:"mark,omitempty"` } func (m *ServerArgs) Reset() { *m = ServerArgs{} } func (m *ServerArgs) String() string { return proto.CompactTextString(m) } func (*ServerArgs) ProtoMessage() {} func (m *ServerArgs) GetSetup() *ServerConfig { if m != nil { return m.Setup } return nil } func (m *ServerArgs) GetMark() *Mark { if m != nil { return m.Mark } return nil } type ServerStatus struct { Stats *ServerStats `protobuf:"bytes,1,opt,name=stats" json:"stats,omitempty"` Port int32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"` } func (m *ServerStatus) Reset() { *m = ServerStatus{} } func (m *ServerStatus) String() string { return proto.CompactTextString(m) } func (*ServerStatus) ProtoMessage() {} func (m *ServerStatus) GetStats() *ServerStats { if m != nil { return m.Stats } return nil } type SimpleRequest struct { // Desired payload type in the response from the server. // If response_type is RANDOM, server randomly chooses one from other formats. ResponseType PayloadType `protobuf:"varint,1,opt,name=response_type,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` // Desired payload size in the response from the server. // If response_type is COMPRESSABLE, this denotes the size before compression. ResponseSize int32 `protobuf:"varint,2,opt,name=response_size" json:"response_size,omitempty"` // Optional input payload sent along with the request. Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"` } func (m *SimpleRequest) Reset() { *m = SimpleRequest{} } func (m *SimpleRequest) String() string { return proto.CompactTextString(m) } func (*SimpleRequest) ProtoMessage() {} func (m *SimpleRequest) GetPayload() *Payload { if m != nil { return m.Payload } return nil } type SimpleResponse struct { Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` } func (m *SimpleResponse) Reset() { *m = SimpleResponse{} } func (m *SimpleResponse) String() string { return proto.CompactTextString(m) } func (*SimpleResponse) ProtoMessage() {} func (m *SimpleResponse) GetPayload() *Payload { if m != nil { return m.Payload } return nil } func init() { proto.RegisterEnum("grpc.testing.PayloadType", PayloadType_name, PayloadType_value) proto.RegisterEnum("grpc.testing.ClientType", ClientType_name, ClientType_value) proto.RegisterEnum("grpc.testing.ServerType", ServerType_name, ServerType_value) proto.RegisterEnum("grpc.testing.RpcType", RpcType_name, RpcType_value) } // Client API for TestService service type TestServiceClient interface { // One request followed by one response. // The server returns the client payload as-is. UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) // One request followed by one response. // The server returns the client payload as-is. StreamingCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingCallClient, error) } type testServiceClient struct { cc *grpc.ClientConn } func NewTestServiceClient(cc *grpc.ClientConn) TestServiceClient { return &testServiceClient{cc} } func (c *testServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { out := new(SimpleResponse) err := grpc.Invoke(ctx, "/grpc.testing.TestService/UnaryCall", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *testServiceClient) StreamingCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingCallClient, error) { stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[0], c.cc, "/grpc.testing.TestService/StreamingCall", opts...) if err != nil { return nil, err } x := &testServiceStreamingCallClient{stream} return x, nil } type TestService_StreamingCallClient interface { Send(*SimpleRequest) error Recv() (*SimpleResponse, error) grpc.ClientStream } type testServiceStreamingCallClient struct { grpc.ClientStream } func (x *testServiceStreamingCallClient) Send(m *SimpleRequest) error { return x.ClientStream.SendMsg(m) } func (x *testServiceStreamingCallClient) Recv() (*SimpleResponse, error) { m := new(SimpleResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } // Server API for TestService service type TestServiceServer interface { // One request followed by one response. // The server returns the client payload as-is. UnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error) // One request followed by one response. // The server returns the client payload as-is. StreamingCall(TestService_StreamingCallServer) error } func RegisterTestServiceServer(s *grpc.Server, srv TestServiceServer) { s.RegisterService(&_TestService_serviceDesc, srv) } func _TestService_UnaryCall_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { in := new(SimpleRequest) if err := codec.Unmarshal(buf, in); err != nil { return nil, err } out, err := srv.(TestServiceServer).UnaryCall(ctx, in) if err != nil { return nil, err } return out, nil } func _TestService_StreamingCall_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(TestServiceServer).StreamingCall(&testServiceStreamingCallServer{stream}) } type TestService_StreamingCallServer interface { Send(*SimpleResponse) error Recv() (*SimpleRequest, error) grpc.ServerStream } type testServiceStreamingCallServer struct { grpc.ServerStream } func (x *testServiceStreamingCallServer) Send(m *SimpleResponse) error { return x.ServerStream.SendMsg(m) } func (x *testServiceStreamingCallServer) Recv() (*SimpleRequest, error) { m := new(SimpleRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } var _TestService_serviceDesc = grpc.ServiceDesc{ ServiceName: "grpc.testing.TestService", HandlerType: (*TestServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "UnaryCall", Handler: _TestService_UnaryCall_Handler, }, }, Streams: []grpc.StreamDesc{ { StreamName: "StreamingCall", Handler: _TestService_StreamingCall_Handler, ServerStreams: true, ClientStreams: true, }, }, } // Client API for Worker service type WorkerClient interface { // Start test with specified workload RunTest(ctx context.Context, opts ...grpc.CallOption) (Worker_RunTestClient, error) // Start test with specified workload RunServer(ctx context.Context, opts ...grpc.CallOption) (Worker_RunServerClient, error) } type workerClient struct { cc *grpc.ClientConn } func NewWorkerClient(cc *grpc.ClientConn) WorkerClient { return &workerClient{cc} } func (c *workerClient) RunTest(ctx context.Context, opts ...grpc.CallOption) (Worker_RunTestClient, error) { stream, err := grpc.NewClientStream(ctx, &_Worker_serviceDesc.Streams[0], c.cc, "/grpc.testing.Worker/RunTest", opts...) if err != nil { return nil, err } x := &workerRunTestClient{stream} return x, nil } type Worker_RunTestClient interface { Send(*ClientArgs) error Recv() (*ClientStatus, error) grpc.ClientStream } type workerRunTestClient struct { grpc.ClientStream } func (x *workerRunTestClient) Send(m *ClientArgs) error { return x.ClientStream.SendMsg(m) } func (x *workerRunTestClient) Recv() (*ClientStatus, error) { m := new(ClientStatus) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *workerClient) RunServer(ctx context.Context, opts ...grpc.CallOption) (Worker_RunServerClient, error) { stream, err := grpc.NewClientStream(ctx, &_Worker_serviceDesc.Streams[1], c.cc, "/grpc.testing.Worker/RunServer", opts...) if err != nil { return nil, err } x := &workerRunServerClient{stream} return x, nil } type Worker_RunServerClient interface { Send(*ServerArgs) error Recv() (*ServerStatus, error) grpc.ClientStream } type workerRunServerClient struct { grpc.ClientStream } func (x *workerRunServerClient) Send(m *ServerArgs) error { return x.ClientStream.SendMsg(m) } func (x *workerRunServerClient) Recv() (*ServerStatus, error) { m := new(ServerStatus) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } // Server API for Worker service type WorkerServer interface { // Start test with specified workload RunTest(Worker_RunTestServer) error // Start test with specified workload RunServer(Worker_RunServerServer) error } func RegisterWorkerServer(s *grpc.Server, srv WorkerServer) { s.RegisterService(&_Worker_serviceDesc, srv) } func _Worker_RunTest_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(WorkerServer).RunTest(&workerRunTestServer{stream}) } type Worker_RunTestServer interface { Send(*ClientStatus) error Recv() (*ClientArgs, error) grpc.ServerStream } type workerRunTestServer struct { grpc.ServerStream } func (x *workerRunTestServer) Send(m *ClientStatus) error { return x.ServerStream.SendMsg(m) } func (x *workerRunTestServer) Recv() (*ClientArgs, error) { m := new(ClientArgs) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func _Worker_RunServer_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(WorkerServer).RunServer(&workerRunServerServer{stream}) } type Worker_RunServerServer interface { Send(*ServerStatus) error Recv() (*ServerArgs, error) grpc.ServerStream } type workerRunServerServer struct { grpc.ServerStream } func (x *workerRunServerServer) Send(m *ServerStatus) error { return x.ServerStream.SendMsg(m) } func (x *workerRunServerServer) Recv() (*ServerArgs, error) { m := new(ServerArgs) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } var _Worker_serviceDesc = grpc.ServiceDesc{ ServiceName: "grpc.testing.Worker", HandlerType: (*WorkerServer)(nil), Methods: []grpc.MethodDesc{}, Streams: []grpc.StreamDesc{ { StreamName: "RunTest", Handler: _Worker_RunTest_Handler, ServerStreams: true, ClientStreams: true, }, { StreamName: "RunServer", Handler: _Worker_RunServer_Handler, ServerStreams: true, ClientStreams: true, }, }, } ================================================ FILE: vendor/google.golang.org/grpc/benchmark/grpc_testing/test.proto ================================================ // An integration test service that covers all the method signature permutations // of unary/streaming requests/responses. syntax = "proto3"; package grpc.testing; enum PayloadType { // Compressable text format. COMPRESSABLE = 0; // Uncompressable binary format. UNCOMPRESSABLE = 1; // Randomly chosen from all other formats defined in this enum. RANDOM = 2; } message StatsRequest { // run number optional int32 test_num = 1; } message ServerStats { // wall clock time double time_elapsed = 1; // user time used by the server process and threads double time_user = 2; // server time used by the server process and all threads double time_system = 3; } message Payload { // The type of data in body. PayloadType type = 1; // Primary contents of payload. bytes body = 2; } message HistogramData { repeated uint32 bucket = 1; double min_seen = 2; double max_seen = 3; double sum = 4; double sum_of_squares = 5; double count = 6; } enum ClientType { SYNCHRONOUS_CLIENT = 0; ASYNC_CLIENT = 1; } enum ServerType { SYNCHRONOUS_SERVER = 0; ASYNC_SERVER = 1; } enum RpcType { UNARY = 0; STREAMING = 1; } message ClientConfig { repeated string server_targets = 1; ClientType client_type = 2; bool enable_ssl = 3; int32 outstanding_rpcs_per_channel = 4; int32 client_channels = 5; int32 payload_size = 6; // only for async client: int32 async_client_threads = 7; RpcType rpc_type = 8; } // Request current stats message Mark {} message ClientArgs { oneof argtype { ClientConfig setup = 1; Mark mark = 2; } } message ClientStats { HistogramData latencies = 1; double time_elapsed = 3; double time_user = 4; double time_system = 5; } message ClientStatus { ClientStats stats = 1; } message ServerConfig { ServerType server_type = 1; int32 threads = 2; bool enable_ssl = 3; } message ServerArgs { oneof argtype { ServerConfig setup = 1; Mark mark = 2; } } message ServerStatus { ServerStats stats = 1; int32 port = 2; } message SimpleRequest { // Desired payload type in the response from the server. // If response_type is RANDOM, server randomly chooses one from other formats. PayloadType response_type = 1; // Desired payload size in the response from the server. // If response_type is COMPRESSABLE, this denotes the size before compression. int32 response_size = 2; // Optional input payload sent along with the request. Payload payload = 3; } message SimpleResponse { Payload payload = 1; } service TestService { // One request followed by one response. // The server returns the client payload as-is. rpc UnaryCall(SimpleRequest) returns (SimpleResponse); // One request followed by one response. // The server returns the client payload as-is. rpc StreamingCall(stream SimpleRequest) returns (stream SimpleResponse); } service Worker { // Start test with specified workload rpc RunTest(stream ClientArgs) returns (stream ClientStatus); // Start test with specified workload rpc RunServer(stream ServerArgs) returns (stream ServerStatus); } ================================================ FILE: vendor/google.golang.org/grpc/benchmark/server/main.go ================================================ package main import ( "flag" "math" "net" "net/http" _ "net/http/pprof" "time" "google.golang.org/grpc/benchmark" "google.golang.org/grpc/grpclog" ) var ( duration = flag.Int("duration", math.MaxInt32, "The duration in seconds to run the benchmark server") ) func main() { flag.Parse() go func() { lis, err := net.Listen("tcp", ":0") if err != nil { grpclog.Fatalf("Failed to listen: %v", err) } grpclog.Println("Server profiling address: ", lis.Addr().String()) if err := http.Serve(lis, nil); err != nil { grpclog.Fatalf("Failed to serve: %v", err) } }() addr, stopper := benchmark.StartServer() grpclog.Println("Server Address: ", addr) <-time.After(time.Duration(*duration) * time.Second) stopper() } ================================================ FILE: vendor/google.golang.org/grpc/benchmark/stats/counter.go ================================================ package stats import ( "sync" "time" ) var ( // TimeNow is used for testing. TimeNow = time.Now ) const ( hour = 0 tenminutes = 1 minute = 2 ) // Counter is a counter that keeps track of its recent values over a given // period of time, and with a given resolution. Use newCounter() to instantiate. type Counter struct { mu sync.RWMutex ts [3]*timeseries lastUpdate time.Time } // newCounter returns a new Counter. func newCounter() *Counter { now := TimeNow() c := &Counter{} c.ts[hour] = newTimeSeries(now, time.Hour, time.Minute) c.ts[tenminutes] = newTimeSeries(now, 10*time.Minute, 10*time.Second) c.ts[minute] = newTimeSeries(now, time.Minute, time.Second) return c } func (c *Counter) advance() time.Time { now := TimeNow() for _, ts := range c.ts { ts.advanceTime(now) } return now } // Value returns the current value of the counter. func (c *Counter) Value() int64 { c.mu.RLock() defer c.mu.RUnlock() return c.ts[minute].headValue() } // LastUpdate returns the last update time of the counter. func (c *Counter) LastUpdate() time.Time { c.mu.RLock() defer c.mu.RUnlock() return c.lastUpdate } // Set updates the current value of the counter. func (c *Counter) Set(value int64) { c.mu.Lock() defer c.mu.Unlock() c.lastUpdate = c.advance() for _, ts := range c.ts { ts.set(value) } } // Incr increments the current value of the counter by 'delta'. func (c *Counter) Incr(delta int64) { c.mu.Lock() defer c.mu.Unlock() c.lastUpdate = c.advance() for _, ts := range c.ts { ts.incr(delta) } } // Delta1h returns the delta for the last hour. func (c *Counter) Delta1h() int64 { c.mu.RLock() defer c.mu.RUnlock() c.advance() return c.ts[hour].delta() } // Delta10m returns the delta for the last 10 minutes. func (c *Counter) Delta10m() int64 { c.mu.RLock() defer c.mu.RUnlock() c.advance() return c.ts[tenminutes].delta() } // Delta1m returns the delta for the last minute. func (c *Counter) Delta1m() int64 { c.mu.RLock() defer c.mu.RUnlock() c.advance() return c.ts[minute].delta() } // Rate1h returns the rate of change of the counter in the last hour. func (c *Counter) Rate1h() float64 { c.mu.RLock() defer c.mu.RUnlock() c.advance() return c.ts[hour].rate() } // Rate10m returns the rate of change of the counter in the last 10 minutes. func (c *Counter) Rate10m() float64 { c.mu.RLock() defer c.mu.RUnlock() c.advance() return c.ts[tenminutes].rate() } // Rate1m returns the rate of change of the counter in the last minute. func (c *Counter) Rate1m() float64 { c.mu.RLock() defer c.mu.RUnlock() c.advance() return c.ts[minute].rate() } // Reset resets the counter to an empty state. func (c *Counter) Reset() { c.mu.Lock() defer c.mu.Unlock() now := TimeNow() for _, ts := range c.ts { ts.reset(now) } } ================================================ FILE: vendor/google.golang.org/grpc/benchmark/stats/histogram.go ================================================ package stats import ( "bytes" "fmt" "io" "strconv" "strings" "time" ) // HistogramValue is the value of Histogram objects. type HistogramValue struct { // Count is the total number of values added to the histogram. Count int64 // Sum is the sum of all the values added to the histogram. Sum int64 // Min is the minimum of all the values added to the histogram. Min int64 // Max is the maximum of all the values added to the histogram. Max int64 // Buckets contains all the buckets of the histogram. Buckets []HistogramBucket } // HistogramBucket is one histogram bucket. type HistogramBucket struct { // LowBound is the lower bound of the bucket. LowBound int64 // Count is the number of values in the bucket. Count int64 } // Print writes textual output of the histogram values. func (v HistogramValue) Print(w io.Writer) { avg := float64(v.Sum) / float64(v.Count) fmt.Fprintf(w, "Count: %d Min: %d Max: %d Avg: %.2f\n", v.Count, v.Min, v.Max, avg) fmt.Fprintf(w, "%s\n", strings.Repeat("-", 60)) if v.Count <= 0 { return } maxBucketDigitLen := len(strconv.FormatInt(v.Buckets[len(v.Buckets)-1].LowBound, 10)) if maxBucketDigitLen < 3 { // For "inf". maxBucketDigitLen = 3 } maxCountDigitLen := len(strconv.FormatInt(v.Count, 10)) percentMulti := 100 / float64(v.Count) accCount := int64(0) for i, b := range v.Buckets { fmt.Fprintf(w, "[%*d, ", maxBucketDigitLen, b.LowBound) if i+1 < len(v.Buckets) { fmt.Fprintf(w, "%*d)", maxBucketDigitLen, v.Buckets[i+1].LowBound) } else { fmt.Fprintf(w, "%*s)", maxBucketDigitLen, "inf") } accCount += b.Count fmt.Fprintf(w, " %*d %5.1f%% %5.1f%%", maxCountDigitLen, b.Count, float64(b.Count)*percentMulti, float64(accCount)*percentMulti) const barScale = 0.1 barLength := int(float64(b.Count)*percentMulti*barScale + 0.5) fmt.Fprintf(w, " %s\n", strings.Repeat("#", barLength)) } } // String returns the textual output of the histogram values as string. func (v HistogramValue) String() string { var b bytes.Buffer v.Print(&b) return b.String() } // A Histogram accumulates values in the form of a histogram. The type of the // values is int64, which is suitable for keeping track of things like RPC // latency in milliseconds. New histogram objects should be obtained via the // New() function. type Histogram struct { opts HistogramOptions buckets []bucketInternal count *Counter sum *Counter tracker *Tracker } // HistogramOptions contains the parameters that define the histogram's buckets. type HistogramOptions struct { // NumBuckets is the number of buckets. NumBuckets int // GrowthFactor is the growth factor of the buckets. A value of 0.1 // indicates that bucket N+1 will be 10% larger than bucket N. GrowthFactor float64 // SmallestBucketSize is the size of the first bucket. Bucket sizes are // rounded down to the nearest integer. SmallestBucketSize float64 // MinValue is the lower bound of the first bucket. MinValue int64 } // bucketInternal is the internal representation of a bucket, which includes a // rate counter. type bucketInternal struct { lowBound int64 count *Counter } // NewHistogram returns a pointer to a new Histogram object that was created // with the provided options. func NewHistogram(opts HistogramOptions) *Histogram { if opts.NumBuckets == 0 { opts.NumBuckets = 32 } if opts.SmallestBucketSize == 0.0 { opts.SmallestBucketSize = 1.0 } h := Histogram{ opts: opts, buckets: make([]bucketInternal, opts.NumBuckets), count: newCounter(), sum: newCounter(), tracker: newTracker(), } low := opts.MinValue delta := opts.SmallestBucketSize for i := 0; i < opts.NumBuckets; i++ { h.buckets[i].lowBound = low h.buckets[i].count = newCounter() low = low + int64(delta) delta = delta * (1.0 + opts.GrowthFactor) } return &h } // Opts returns a copy of the options used to create the Histogram. func (h *Histogram) Opts() HistogramOptions { return h.opts } // Add adds a value to the histogram. func (h *Histogram) Add(value int64) error { bucket, err := h.findBucket(value) if err != nil { return err } h.buckets[bucket].count.Incr(1) h.count.Incr(1) h.sum.Incr(value) h.tracker.Push(value) return nil } // LastUpdate returns the time at which the object was last updated. func (h *Histogram) LastUpdate() time.Time { return h.count.LastUpdate() } // Value returns the accumulated state of the histogram since it was created. func (h *Histogram) Value() HistogramValue { b := make([]HistogramBucket, len(h.buckets)) for i, v := range h.buckets { b[i] = HistogramBucket{ LowBound: v.lowBound, Count: v.count.Value(), } } v := HistogramValue{ Count: h.count.Value(), Sum: h.sum.Value(), Min: h.tracker.Min(), Max: h.tracker.Max(), Buckets: b, } return v } // Delta1h returns the change in the last hour. func (h *Histogram) Delta1h() HistogramValue { b := make([]HistogramBucket, len(h.buckets)) for i, v := range h.buckets { b[i] = HistogramBucket{ LowBound: v.lowBound, Count: v.count.Delta1h(), } } v := HistogramValue{ Count: h.count.Delta1h(), Sum: h.sum.Delta1h(), Min: h.tracker.Min1h(), Max: h.tracker.Max1h(), Buckets: b, } return v } // Delta10m returns the change in the last 10 minutes. func (h *Histogram) Delta10m() HistogramValue { b := make([]HistogramBucket, len(h.buckets)) for i, v := range h.buckets { b[i] = HistogramBucket{ LowBound: v.lowBound, Count: v.count.Delta10m(), } } v := HistogramValue{ Count: h.count.Delta10m(), Sum: h.sum.Delta10m(), Min: h.tracker.Min10m(), Max: h.tracker.Max10m(), Buckets: b, } return v } // Delta1m returns the change in the last 10 minutes. func (h *Histogram) Delta1m() HistogramValue { b := make([]HistogramBucket, len(h.buckets)) for i, v := range h.buckets { b[i] = HistogramBucket{ LowBound: v.lowBound, Count: v.count.Delta1m(), } } v := HistogramValue{ Count: h.count.Delta1m(), Sum: h.sum.Delta1m(), Min: h.tracker.Min1m(), Max: h.tracker.Max1m(), Buckets: b, } return v } // findBucket does a binary search to find in which bucket the value goes. func (h *Histogram) findBucket(value int64) (int, error) { lastBucket := len(h.buckets) - 1 min, max := 0, lastBucket for max >= min { b := (min + max) / 2 if value >= h.buckets[b].lowBound && (b == lastBucket || value < h.buckets[b+1].lowBound) { return b, nil } if value < h.buckets[b].lowBound { max = b - 1 continue } min = b + 1 } return 0, fmt.Errorf("no bucket for value: %f", value) } ================================================ FILE: vendor/google.golang.org/grpc/benchmark/stats/stats.go ================================================ package stats import ( "bytes" "fmt" "io" "math" "time" ) // Stats is a simple helper for gathering additional statistics like histogram // during benchmarks. This is not thread safe. type Stats struct { numBuckets int unit time.Duration min, max int64 histogram *Histogram durations durationSlice dirty bool } type durationSlice []time.Duration // NewStats creates a new Stats instance. If numBuckets is not positive, // the default value (16) will be used. func NewStats(numBuckets int) *Stats { if numBuckets <= 0 { numBuckets = 16 } return &Stats{ // Use one more bucket for the last unbounded bucket. numBuckets: numBuckets + 1, durations: make(durationSlice, 0, 100000), } } // Add adds an elapsed time per operation to the stats. func (stats *Stats) Add(d time.Duration) { stats.durations = append(stats.durations, d) stats.dirty = true } // Clear resets the stats, removing all values. func (stats *Stats) Clear() { stats.durations = stats.durations[:0] stats.histogram = nil stats.dirty = false } // maybeUpdate updates internal stat data if there was any newly added // stats since this was updated. func (stats *Stats) maybeUpdate() { if !stats.dirty { return } stats.min = math.MaxInt64 stats.max = 0 for _, d := range stats.durations { if stats.min > int64(d) { stats.min = int64(d) } if stats.max < int64(d) { stats.max = int64(d) } } // Use the largest unit that can represent the minimum time duration. stats.unit = time.Nanosecond for _, u := range []time.Duration{time.Microsecond, time.Millisecond, time.Second} { if stats.min <= int64(u) { break } stats.unit = u } // Adjust the min/max according to the new unit. stats.min /= int64(stats.unit) stats.max /= int64(stats.unit) numBuckets := stats.numBuckets if n := int(stats.max - stats.min + 1); n < numBuckets { numBuckets = n } stats.histogram = NewHistogram(HistogramOptions{ NumBuckets: numBuckets, // max(i.e., Nth lower bound) = min + (1 + growthFactor)^(numBuckets-2). GrowthFactor: math.Pow(float64(stats.max-stats.min), 1/float64(stats.numBuckets-2)) - 1, SmallestBucketSize: 1.0, MinValue: stats.min}) for _, d := range stats.durations { stats.histogram.Add(int64(d / stats.unit)) } stats.dirty = false } // Print writes textual output of the Stats. func (stats *Stats) Print(w io.Writer) { stats.maybeUpdate() if stats.histogram == nil { fmt.Fprint(w, "Histogram (empty)\n") } else { fmt.Fprintf(w, "Histogram (unit: %s)\n", fmt.Sprintf("%v", stats.unit)[1:]) stats.histogram.Value().Print(w) } } // String returns the textual output of the Stats as string. func (stats *Stats) String() string { var b bytes.Buffer stats.Print(&b) return b.String() } ================================================ FILE: vendor/google.golang.org/grpc/benchmark/stats/timeseries.go ================================================ package stats import ( "math" "time" ) // timeseries holds the history of a changing value over a predefined period of // time. type timeseries struct { size int // The number of time slots. Equivalent to len(slots). resolution time.Duration // The time resolution of each slot. stepCount int64 // The number of intervals seen since creation. head int // The position of the current time in slots. time time.Time // The time at the beginning of the current time slot. slots []int64 // A circular buffer of time slots. } // newTimeSeries returns a newly allocated timeseries that covers the requested // period with the given resolution. func newTimeSeries(initialTime time.Time, period, resolution time.Duration) *timeseries { size := int(period.Nanoseconds()/resolution.Nanoseconds()) + 1 return ×eries{ size: size, resolution: resolution, stepCount: 1, time: initialTime, slots: make([]int64, size), } } // advanceTimeWithFill moves the timeseries forward to time t and fills in any // slots that get skipped in the process with the given value. Values older than // the timeseries period are lost. func (ts *timeseries) advanceTimeWithFill(t time.Time, value int64) { advanceTo := t.Truncate(ts.resolution) if !advanceTo.After(ts.time) { // This is shortcut for the most common case of a busy counter // where updates come in many times per ts.resolution. ts.time = advanceTo return } steps := int(advanceTo.Sub(ts.time).Nanoseconds() / ts.resolution.Nanoseconds()) ts.stepCount += int64(steps) if steps > ts.size { steps = ts.size } for steps > 0 { ts.head = (ts.head + 1) % ts.size ts.slots[ts.head] = value steps-- } ts.time = advanceTo } // advanceTime moves the timeseries forward to time t and fills in any slots // that get skipped in the process with the head value. Values older than the // timeseries period are lost. func (ts *timeseries) advanceTime(t time.Time) { ts.advanceTimeWithFill(t, ts.slots[ts.head]) } // set sets the current value of the timeseries. func (ts *timeseries) set(value int64) { ts.slots[ts.head] = value } // incr sets the current value of the timeseries. func (ts *timeseries) incr(delta int64) { ts.slots[ts.head] += delta } // headValue returns the latest value from the timeseries. func (ts *timeseries) headValue() int64 { return ts.slots[ts.head] } // headTime returns the time of the latest value from the timeseries. func (ts *timeseries) headTime() time.Time { return ts.time } // tailValue returns the oldest value from the timeseries. func (ts *timeseries) tailValue() int64 { if ts.stepCount < int64(ts.size) { return 0 } return ts.slots[(ts.head+1)%ts.size] } // tailTime returns the time of the oldest value from the timeseries. func (ts *timeseries) tailTime() time.Time { size := int64(ts.size) if ts.stepCount < size { size = ts.stepCount } return ts.time.Add(-time.Duration(size-1) * ts.resolution) } // delta returns the difference between the newest and oldest values from the // timeseries. func (ts *timeseries) delta() int64 { return ts.headValue() - ts.tailValue() } // rate returns the rate of change between the oldest and newest values from // the timeseries in units per second. func (ts *timeseries) rate() float64 { deltaTime := ts.headTime().Sub(ts.tailTime()).Seconds() if deltaTime == 0 { return 0 } return float64(ts.delta()) / deltaTime } // min returns the smallest value from the timeseries. func (ts *timeseries) min() int64 { to := ts.size if ts.stepCount < int64(ts.size) { to = ts.head + 1 } tail := (ts.head + 1) % ts.size min := int64(math.MaxInt64) for b := 0; b < to; b++ { if b != tail && ts.slots[b] < min { min = ts.slots[b] } } return min } // max returns the largest value from the timeseries. func (ts *timeseries) max() int64 { to := ts.size if ts.stepCount < int64(ts.size) { to = ts.head + 1 } tail := (ts.head + 1) % ts.size max := int64(math.MinInt64) for b := 0; b < to; b++ { if b != tail && ts.slots[b] > max { max = ts.slots[b] } } return max } // reset resets the timeseries to an empty state. func (ts *timeseries) reset(t time.Time) { ts.head = 0 ts.time = t ts.stepCount = 1 ts.slots = make([]int64, ts.size) } ================================================ FILE: vendor/google.golang.org/grpc/benchmark/stats/tracker.go ================================================ package stats import ( "math" "sync" "time" ) // Tracker is a min/max value tracker that keeps track of its min/max values // over a given period of time, and with a given resolution. The initial min // and max values are math.MaxInt64 and math.MinInt64 respectively. type Tracker struct { mu sync.RWMutex min, max int64 // All time min/max. minTS, maxTS [3]*timeseries lastUpdate time.Time } // newTracker returns a new Tracker. func newTracker() *Tracker { now := TimeNow() t := &Tracker{} t.minTS[hour] = newTimeSeries(now, time.Hour, time.Minute) t.minTS[tenminutes] = newTimeSeries(now, 10*time.Minute, 10*time.Second) t.minTS[minute] = newTimeSeries(now, time.Minute, time.Second) t.maxTS[hour] = newTimeSeries(now, time.Hour, time.Minute) t.maxTS[tenminutes] = newTimeSeries(now, 10*time.Minute, 10*time.Second) t.maxTS[minute] = newTimeSeries(now, time.Minute, time.Second) t.init() return t } func (t *Tracker) init() { t.min = math.MaxInt64 t.max = math.MinInt64 for _, ts := range t.minTS { ts.set(math.MaxInt64) } for _, ts := range t.maxTS { ts.set(math.MinInt64) } } func (t *Tracker) advance() time.Time { now := TimeNow() for _, ts := range t.minTS { ts.advanceTimeWithFill(now, math.MaxInt64) } for _, ts := range t.maxTS { ts.advanceTimeWithFill(now, math.MinInt64) } return now } // LastUpdate returns the last update time of the range. func (t *Tracker) LastUpdate() time.Time { t.mu.RLock() defer t.mu.RUnlock() return t.lastUpdate } // Push adds a new value if it is a new minimum or maximum. func (t *Tracker) Push(value int64) { t.mu.Lock() defer t.mu.Unlock() t.lastUpdate = t.advance() if t.min > value { t.min = value } if t.max < value { t.max = value } for _, ts := range t.minTS { if ts.headValue() > value { ts.set(value) } } for _, ts := range t.maxTS { if ts.headValue() < value { ts.set(value) } } } // Min returns the minimum value of the tracker func (t *Tracker) Min() int64 { t.mu.RLock() defer t.mu.RUnlock() return t.min } // Max returns the maximum value of the tracker. func (t *Tracker) Max() int64 { t.mu.RLock() defer t.mu.RUnlock() return t.max } // Min1h returns the minimum value for the last hour. func (t *Tracker) Min1h() int64 { t.mu.Lock() defer t.mu.Unlock() t.advance() return t.minTS[hour].min() } // Max1h returns the maximum value for the last hour. func (t *Tracker) Max1h() int64 { t.mu.Lock() defer t.mu.Unlock() t.advance() return t.maxTS[hour].max() } // Min10m returns the minimum value for the last 10 minutes. func (t *Tracker) Min10m() int64 { t.mu.Lock() defer t.mu.Unlock() t.advance() return t.minTS[tenminutes].min() } // Max10m returns the maximum value for the last 10 minutes. func (t *Tracker) Max10m() int64 { t.mu.Lock() defer t.mu.Unlock() t.advance() return t.maxTS[tenminutes].max() } // Min1m returns the minimum value for the last 1 minute. func (t *Tracker) Min1m() int64 { t.mu.Lock() defer t.mu.Unlock() t.advance() return t.minTS[minute].min() } // Max1m returns the maximum value for the last 1 minute. func (t *Tracker) Max1m() int64 { t.mu.Lock() defer t.mu.Unlock() t.advance() return t.maxTS[minute].max() } // Reset resets the range to an empty state. func (t *Tracker) Reset() { t.mu.Lock() defer t.mu.Unlock() now := TimeNow() for _, ts := range t.minTS { ts.reset(now) } for _, ts := range t.maxTS { ts.reset(now) } t.init() } ================================================ FILE: vendor/google.golang.org/grpc/benchmark/stats/util.go ================================================ package stats import ( "bufio" "bytes" "fmt" "os" "runtime" "sort" "strings" "sync" "testing" ) var ( curB *testing.B curBenchName string curStats map[string]*Stats orgStdout *os.File nextOutPos int injectCond *sync.Cond injectDone chan struct{} ) // AddStats adds a new unnamed Stats instance to the current benchmark. You need // to run benchmarks by calling RunTestMain() to inject the stats to the // benchmark results. If numBuckets is not positive, the default value (16) will // be used. Please note that this calls b.ResetTimer() since it may be blocked // until the previous benchmark stats is printed out. So AddStats() should // typically be called at the very beginning of each benchmark function. func AddStats(b *testing.B, numBuckets int) *Stats { return AddStatsWithName(b, "", numBuckets) } // AddStatsWithName adds a new named Stats instance to the current benchmark. // With this, you can add multiple stats in a single benchmark. You need // to run benchmarks by calling RunTestMain() to inject the stats to the // benchmark results. If numBuckets is not positive, the default value (16) will // be used. Please note that this calls b.ResetTimer() since it may be blocked // until the previous benchmark stats is printed out. So AddStatsWithName() // should typically be called at the very beginning of each benchmark function. func AddStatsWithName(b *testing.B, name string, numBuckets int) *Stats { var benchName string for i := 1; ; i++ { pc, _, _, ok := runtime.Caller(i) if !ok { panic("benchmark function not found") } p := strings.Split(runtime.FuncForPC(pc).Name(), ".") benchName = p[len(p)-1] if strings.HasPrefix(benchName, "Benchmark") { break } } procs := runtime.GOMAXPROCS(-1) if procs != 1 { benchName = fmt.Sprintf("%s-%d", benchName, procs) } stats := NewStats(numBuckets) if injectCond != nil { // We need to wait until the previous benchmark stats is printed out. injectCond.L.Lock() for curB != nil && curBenchName != benchName { injectCond.Wait() } curB = b curBenchName = benchName curStats[name] = stats injectCond.L.Unlock() } b.ResetTimer() return stats } // RunTestMain runs the tests with enabling injection of benchmark stats. It // returns an exit code to pass to os.Exit. func RunTestMain(m *testing.M) int { startStatsInjector() defer stopStatsInjector() return m.Run() } // startStatsInjector starts stats injection to benchmark results. func startStatsInjector() { orgStdout = os.Stdout r, w, _ := os.Pipe() os.Stdout = w nextOutPos = 0 resetCurBenchStats() injectCond = sync.NewCond(&sync.Mutex{}) injectDone = make(chan struct{}) go func() { defer close(injectDone) scanner := bufio.NewScanner(r) scanner.Split(splitLines) for scanner.Scan() { injectStatsIfFinished(scanner.Text()) } if err := scanner.Err(); err != nil { panic(err) } }() } // stopStatsInjector stops stats injection and restores os.Stdout. func stopStatsInjector() { os.Stdout.Close() <-injectDone injectCond = nil os.Stdout = orgStdout } // splitLines is a split function for a bufio.Scanner that returns each line // of text, teeing texts to the original stdout even before each line ends. func splitLines(data []byte, eof bool) (advance int, token []byte, err error) { if eof && len(data) == 0 { return 0, nil, nil } if i := bytes.IndexByte(data, '\n'); i >= 0 { orgStdout.Write(data[nextOutPos : i+1]) nextOutPos = 0 return i + 1, data[0:i], nil } orgStdout.Write(data[nextOutPos:]) nextOutPos = len(data) if eof { // This is a final, non-terminated line. Return it. return len(data), data, nil } return 0, nil, nil } // injectStatsIfFinished prints out the stats if the current benchmark finishes. func injectStatsIfFinished(line string) { injectCond.L.Lock() defer injectCond.L.Unlock() // We assume that the benchmark results start with the benchmark name. if curB == nil || !strings.HasPrefix(line, curBenchName) { return } if !curB.Failed() { // Output all stats in alphabetical order. names := make([]string, 0, len(curStats)) for name := range curStats { names = append(names, name) } sort.Strings(names) for _, name := range names { stats := curStats[name] // The output of stats starts with a header like "Histogram (unit: ms)" // followed by statistical properties and the buckets. Add the stats name // if it is a named stats and indent them as Go testing outputs. lines := strings.Split(stats.String(), "\n") if n := len(lines); n > 0 { if name != "" { name = ": " + name } fmt.Fprintf(orgStdout, "--- %s%s\n", lines[0], name) for _, line := range lines[1 : n-1] { fmt.Fprintf(orgStdout, "\t%s\n", line) } } } } resetCurBenchStats() injectCond.Signal() } // resetCurBenchStats resets the current benchmark stats. func resetCurBenchStats() { curB = nil curBenchName = "" curStats = make(map[string]*Stats) } ================================================ FILE: vendor/google.golang.org/grpc/call.go ================================================ /* * * Copyright 2014, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ package grpc import ( "io" "golang.org/x/net/context" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" "google.golang.org/grpc/transport" ) // recvResponse receives and parses an RPC response. // On error, it returns the error and indicates whether the call should be retried. // // TODO(zhaoq): Check whether the received message sequence is valid. func recvResponse(codec Codec, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) error { // Try to acquire header metadata from the server if there is any. var err error c.headerMD, err = stream.Header() if err != nil { return err } p := &parser{s: stream} for { if err = recv(p, codec, reply); err != nil { if err == io.EOF { break } return err } } c.trailerMD = stream.Trailer() return nil } // sendRequest writes out various information of an RPC such as Context and Message. func sendRequest(ctx context.Context, codec Codec, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) { stream, err := t.NewStream(ctx, callHdr) if err != nil { return nil, err } defer func() { if err != nil { if _, ok := err.(transport.ConnectionError); !ok { t.CloseStream(stream, err) } } }() // TODO(zhaoq): Support compression. outBuf, err := encode(codec, args, compressionNone) if err != nil { return nil, transport.StreamErrorf(codes.Internal, "grpc: %v", err) } err = t.Write(stream, outBuf, opts) if err != nil { return nil, err } // Sent successfully. return stream, nil } // callInfo contains all related configuration and information about an RPC. type callInfo struct { failFast bool headerMD metadata.MD trailerMD metadata.MD } // Invoke is called by the generated code. It sends the RPC request on the // wire and returns after response is received. func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { var c callInfo for _, o := range opts { if err := o.before(&c); err != nil { return toRPCErr(err) } } defer func() { for _, o := range opts { o.after(&c) } }() callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, } topts := &transport.Options{ Last: true, Delay: false, } var ( ts int // track the transport sequence number lastErr error // record the error that happened ) for { var ( err error t transport.ClientTransport stream *transport.Stream ) // TODO(zhaoq): Need a formal spec of retry strategy for non-failfast rpcs. if lastErr != nil && c.failFast { return toRPCErr(lastErr) } t, ts, err = cc.wait(ctx, ts) if err != nil { if lastErr != nil { // This was a retry; return the error from the last attempt. return toRPCErr(lastErr) } return toRPCErr(err) } stream, err = sendRequest(ctx, cc.dopts.codec, callHdr, t, args, topts) if err != nil { if _, ok := err.(transport.ConnectionError); ok { lastErr = err continue } if lastErr != nil { return toRPCErr(lastErr) } return toRPCErr(err) } // Receive the response lastErr = recvResponse(cc.dopts.codec, t, &c, stream, reply) if _, ok := lastErr.(transport.ConnectionError); ok { continue } t.CloseStream(stream, lastErr) if lastErr != nil { return toRPCErr(lastErr) } return Errorf(stream.StatusCode(), stream.StatusDesc()) } } ================================================ FILE: vendor/google.golang.org/grpc/clientconn.go ================================================ /* * * Copyright 2014, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ package grpc import ( "errors" "net" "strings" "sync" "time" "golang.org/x/net/context" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/transport" ) var ( // ErrUnspecTarget indicates that the target address is unspecified. ErrUnspecTarget = errors.New("grpc: target is unspecified") // ErrClientConnClosing indicates that the operation is illegal because // the session is closing. ErrClientConnClosing = errors.New("grpc: the client connection is closing") // ErrClientConnTimeout indicates that the connection could not be // established or re-established within the specified timeout. ErrClientConnTimeout = errors.New("grpc: timed out trying to connect") ) // dialOptions configure a Dial call. dialOptions are set by the DialOption // values passed to Dial. type dialOptions struct { codec Codec copts transport.ConnectOptions } // DialOption configures how we set up the connection. type DialOption func(*dialOptions) // WithCodec returns a DialOption which sets a codec for message marshaling and unmarshaling. func WithCodec(c Codec) DialOption { return func(o *dialOptions) { o.codec = c } } // WithTransportCredentials returns a DialOption which configures a // connection level security credentials (e.g., TLS/SSL). func WithTransportCredentials(creds credentials.TransportAuthenticator) DialOption { return func(o *dialOptions) { o.copts.AuthOptions = append(o.copts.AuthOptions, creds) } } // WithPerRPCCredentials returns a DialOption which sets // credentials which will place auth state on each outbound RPC. func WithPerRPCCredentials(creds credentials.Credentials) DialOption { return func(o *dialOptions) { o.copts.AuthOptions = append(o.copts.AuthOptions, creds) } } // WithTimeout returns a DialOption that configures a timeout for dialing a client connection. func WithTimeout(d time.Duration) DialOption { return func(o *dialOptions) { o.copts.Timeout = d } } // WithDialer returns a DialOption that specifies a function to use for dialing network addresses. func WithDialer(f func(addr string, timeout time.Duration) (net.Conn, error)) DialOption { return func(o *dialOptions) { o.copts.Dialer = f } } // Dial creates a client connection the given target. // TODO(zhaoq): Have an option to make Dial return immediately without waiting // for connection to complete. func Dial(target string, opts ...DialOption) (*ClientConn, error) { if target == "" { return nil, ErrUnspecTarget } cc := &ClientConn{ target: target, } for _, opt := range opts { opt(&cc.dopts) } colonPos := strings.LastIndex(target, ":") if colonPos == -1 { colonPos = len(target) } cc.authority = target[:colonPos] if cc.dopts.codec == nil { // Set the default codec. cc.dopts.codec = protoCodec{} } if err := cc.resetTransport(false); err != nil { return nil, err } cc.shutdownChan = make(chan struct{}) // Start to monitor the error status of transport. go cc.transportMonitor() return cc, nil } // ClientConn represents a client connection to an RPC service. type ClientConn struct { target string authority string dopts dialOptions shutdownChan chan struct{} mu sync.Mutex // ready is closed and becomes nil when a new transport is up or failed // due to timeout. ready chan struct{} // Indicates the ClientConn is under destruction. closing bool // Every time a new transport is created, this is incremented by 1. Used // to avoid trying to recreate a transport while the new one is already // under construction. transportSeq int transport transport.ClientTransport } func (cc *ClientConn) resetTransport(closeTransport bool) error { var retries int start := time.Now() for { cc.mu.Lock() t := cc.transport ts := cc.transportSeq // Avoid wait() picking up a dying transport unnecessarily. cc.transportSeq = 0 if cc.closing { cc.mu.Unlock() return ErrClientConnClosing } cc.mu.Unlock() if closeTransport { t.Close() } // Adjust timeout for the current try. copts := cc.dopts.copts if copts.Timeout < 0 { cc.Close() return ErrClientConnTimeout } if copts.Timeout > 0 { copts.Timeout -= time.Since(start) if copts.Timeout <= 0 { cc.Close() return ErrClientConnTimeout } } newTransport, err := transport.NewClientTransport(cc.target, &copts) if err != nil { sleepTime := backoff(retries) // Fail early before falling into sleep. if cc.dopts.copts.Timeout > 0 && cc.dopts.copts.Timeout < sleepTime+time.Since(start) { cc.Close() return ErrClientConnTimeout } closeTransport = false time.Sleep(sleepTime) retries++ grpclog.Printf("grpc: ClientConn.resetTransport failed to create client transport: %v; Reconnecting to %q", err, cc.target) continue } cc.mu.Lock() if cc.closing { // cc.Close() has been invoked. cc.mu.Unlock() newTransport.Close() return ErrClientConnClosing } cc.transport = newTransport cc.transportSeq = ts + 1 if cc.ready != nil { close(cc.ready) cc.ready = nil } cc.mu.Unlock() return nil } } // Run in a goroutine to track the error in transport and create the // new transport if an error happens. It returns when the channel is closing. func (cc *ClientConn) transportMonitor() { for { select { // shutdownChan is needed to detect the channel teardown when // the ClientConn is idle (i.e., no RPC in flight). case <-cc.shutdownChan: return case <-cc.transport.Error(): if err := cc.resetTransport(true); err != nil { // The channel is closing. grpclog.Printf("grpc: ClientConn.transportMonitor exits due to: %v", err) return } continue } } } // When wait returns, either the new transport is up or ClientConn is // closing. Used to avoid working on a dying transport. It updates and // returns the transport and its version when there is no error. func (cc *ClientConn) wait(ctx context.Context, ts int) (transport.ClientTransport, int, error) { for { cc.mu.Lock() switch { case cc.closing: cc.mu.Unlock() return nil, 0, ErrClientConnClosing case ts < cc.transportSeq: // Worked on a dying transport. Try the new one immediately. defer cc.mu.Unlock() return cc.transport, cc.transportSeq, nil default: ready := cc.ready if ready == nil { ready = make(chan struct{}) cc.ready = ready } cc.mu.Unlock() select { case <-ctx.Done(): return nil, 0, transport.ContextErr(ctx.Err()) // Wait until the new transport is ready or failed. case <-ready: } } } } // Close starts to tear down the ClientConn. Returns ErrClientConnClosing if // it has been closed (mostly due to dial time-out). // TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in // some edge cases (e.g., the caller opens and closes many ClientConn's in a // tight loop. func (cc *ClientConn) Close() error { cc.mu.Lock() defer cc.mu.Unlock() if cc.closing { return ErrClientConnClosing } cc.closing = true if cc.ready != nil { close(cc.ready) cc.ready = nil } if cc.transport != nil { cc.transport.Close() } if cc.shutdownChan != nil { close(cc.shutdownChan) } return nil } ================================================ FILE: vendor/google.golang.org/grpc/codegen.sh ================================================ #!/bin/bash # This script serves as an example to demonstrate how to generate the gRPC-Go # interface and the related messages from .proto file. # # It assumes the installation of i) Google proto buffer compiler at # https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen # plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have # not, please install them first. # # We recommend running this script at $GOPATH or $GOPATH/src. # # If this is not what you need, feel free to make your own scripts. Again, this # script is for demonstration purpose. # proto=$1 protoc --go_out=plugins=grpc:. $proto ================================================ FILE: vendor/google.golang.org/grpc/codes/code_string.go ================================================ // generated by stringer -type=Code; DO NOT EDIT package codes import "fmt" const _Code_name = "OKCanceledUnknownInvalidArgumentDeadlineExceededNotFoundAlreadyExistsPermissionDeniedResourceExhaustedFailedPreconditionAbortedOutOfRangeUnimplementedInternalUnavailableDataLossUnauthenticated" var _Code_index = [...]uint8{0, 2, 10, 17, 32, 48, 56, 69, 85, 102, 120, 127, 137, 150, 158, 169, 177, 192} func (i Code) String() string { if i+1 >= Code(len(_Code_index)) { return fmt.Sprintf("Code(%d)", i) } return _Code_name[_Code_index[i]:_Code_index[i+1]] } ================================================ FILE: vendor/google.golang.org/grpc/codes/codes.go ================================================ /* * * Copyright 2014, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ // Package codes defines the canonical error codes used by gRPC. It is // consistent across various languages. package codes // A Code is an unsigned 32-bit error code as defined in the gRPC spec. type Code uint32 //go:generate stringer -type=Code const ( // OK is returned on success. OK Code = 0 // Canceled indicates the operation was cancelled (typically by the caller). Canceled Code = 1 // Unknown error. An example of where this error may be returned is // if a Status value received from another address space belongs to // an error-space that is not known in this address space. Also // errors raised by APIs that do not return enough error information // may be converted to this error. Unknown Code = 2 // InvalidArgument indicates client specified an invalid argument. // Note that this differs from FailedPrecondition. It indicates arguments // that are problematic regardless of the state of the system // (e.g., a malformed file name). InvalidArgument Code = 3 // DeadlineExceeded means operation expired before completion. // For operations that change the state of the system, this error may be // returned even if the operation has completed successfully. For // example, a successful response from a server could have been delayed // long enough for the deadline to expire. DeadlineExceeded Code = 4 // NotFound means some requested entity (e.g., file or directory) was // not found. NotFound Code = 5 // AlreadyExists means an attempt to create an entity failed because one // already exists. AlreadyExists Code = 6 // PermissionDenied indicates the caller does not have permission to // execute the specified operation. It must not be used for rejections // caused by exhausting some resource (use ResourceExhausted // instead for those errors). It must not be // used if the caller cannot be identified (use Unauthenticated // instead for those errors). PermissionDenied Code = 7 // Unauthenticated indicates the request does not have valid // authentication credentials for the operation. Unauthenticated Code = 16 // ResourceExhausted indicates some resource has been exhausted, perhaps // a per-user quota, or perhaps the entire file system is out of space. ResourceExhausted Code = 8 // FailedPrecondition indicates operation was rejected because the // system is not in a state required for the operation's execution. // For example, directory to be deleted may be non-empty, an rmdir // operation is applied to a non-directory, etc. // // A litmus test that may help a service implementor in deciding // between FailedPrecondition, Aborted, and Unavailable: // (a) Use Unavailable if the client can retry just the failing call. // (b) Use Aborted if the client should retry at a higher-level // (e.g., restarting a read-modify-write sequence). // (c) Use FailedPrecondition if the client should not retry until // the system state has been explicitly fixed. E.g., if an "rmdir" // fails because the directory is non-empty, FailedPrecondition // should be returned since the client should not retry unless // they have first fixed up the directory by deleting files from it. // (d) Use FailedPrecondition if the client performs conditional // REST Get/Update/Delete on a resource and the resource on the // server does not match the condition. E.g., conflicting // read-modify-write on the same resource. FailedPrecondition Code = 9 // Aborted indicates the operation was aborted, typically due to a // concurrency issue like sequencer check failures, transaction aborts, // etc. // // See litmus test above for deciding between FailedPrecondition, // Aborted, and Unavailable. Aborted Code = 10 // OutOfRange means operation was attempted past the valid range. // E.g., seeking or reading past end of file. // // Unlike InvalidArgument, this error indicates a problem that may // be fixed if the system state changes. For example, a 32-bit file // system will generate InvalidArgument if asked to read at an // offset that is not in the range [0,2^32-1], but it will generate // OutOfRange if asked to read from an offset past the current // file size. // // There is a fair bit of overlap between FailedPrecondition and // OutOfRange. We recommend using OutOfRange (the more specific // error) when it applies so that callers who are iterating through // a space can easily look for an OutOfRange error to detect when // they are done. OutOfRange Code = 11 // Unimplemented indicates operation is not implemented or not // supported/enabled in this service. Unimplemented Code = 12 // Internal errors. Means some invariants expected by underlying // system has been broken. If you see one of these errors, // something is very broken. Internal Code = 13 // Unavailable indicates the service is currently unavailable. // This is a most likely a transient condition and may be corrected // by retrying with a backoff. // // See litmus test above for deciding between FailedPrecondition, // Aborted, and Unavailable. Unavailable Code = 14 // DataLoss indicates unrecoverable data loss or corruption. DataLoss Code = 15 ) ================================================ FILE: vendor/google.golang.org/grpc/credentials/credentials.go ================================================ /* * * Copyright 2014, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ // Package credentials implements various credentials supported by gRPC library, // which encapsulate all the state needed by a client to authenticate with a // server and make various assertions, e.g., about the client's identity, role, // or whether it is authorized to make a particular call. package credentials import ( "crypto/tls" "crypto/x509" "fmt" "io/ioutil" "net" "strings" "time" "golang.org/x/net/context" "golang.org/x/oauth2" "golang.org/x/oauth2/google" "golang.org/x/oauth2/jwt" ) var ( // alpnProtoStr are the specified application level protocols for gRPC. alpnProtoStr = []string{"h2", "h2-14", "h2-15", "h2-16"} ) // Credentials defines the common interface all supported credentials must // implement. type Credentials interface { // GetRequestMetadata gets the current request metadata, refreshing // tokens if required. This should be called by the transport layer on // each request, and the data should be populated in headers or other // context. When supported by the underlying implementation, ctx can // be used for timeout and cancellation. // TODO(zhaoq): Define the set of the qualified keys instead of leaving // it as an arbitrary string. GetRequestMetadata(ctx context.Context) (map[string]string, error) } // ProtocolInfo provides information regarding the gRPC wire protocol version, // security protocol, security protocol version in use, etc. type ProtocolInfo struct { // ProtocolVersion is the gRPC wire protocol version. ProtocolVersion string // SecurityProtocol is the security protocol in use. SecurityProtocol string // SecurityVersion is the security protocol version. SecurityVersion string } // TransportAuthenticator defines the common interface for all the live gRPC wire // protocols and supported transport security protocols (e.g., TLS, SSL). type TransportAuthenticator interface { // ClientHandshake does the authentication handshake specified by the corresponding // authentication protocol on rawConn for clients. ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (net.Conn, error) // ServerHandshake does the authentication handshake for servers. ServerHandshake(rawConn net.Conn) (net.Conn, error) // Info provides the ProtocolInfo of this TransportAuthenticator. Info() ProtocolInfo Credentials } // tlsCreds is the credentials required for authenticating a connection using TLS. type tlsCreds struct { // TLS configuration config tls.Config } func (c *tlsCreds) Info() ProtocolInfo { return ProtocolInfo{ SecurityProtocol: "tls", SecurityVersion: "1.2", } } // GetRequestMetadata returns nil, nil since TLS credentials does not have // metadata. func (c *tlsCreds) GetRequestMetadata(ctx context.Context) (map[string]string, error) { return nil, nil } type timeoutError struct{} func (timeoutError) Error() string { return "credentials: Dial timed out" } func (timeoutError) Timeout() bool { return true } func (timeoutError) Temporary() bool { return true } func (c *tlsCreds) ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (_ net.Conn, err error) { // borrow some code from tls.DialWithDialer var errChannel chan error if timeout != 0 { errChannel = make(chan error, 2) time.AfterFunc(timeout, func() { errChannel <- timeoutError{} }) } if c.config.ServerName == "" { colonPos := strings.LastIndex(addr, ":") if colonPos == -1 { colonPos = len(addr) } c.config.ServerName = addr[:colonPos] } conn := tls.Client(rawConn, &c.config) if timeout == 0 { err = conn.Handshake() } else { go func() { errChannel <- conn.Handshake() }() err = <-errChannel } if err != nil { rawConn.Close() return nil, err } return conn, nil } func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, error) { conn := tls.Server(rawConn, &c.config) if err := conn.Handshake(); err != nil { rawConn.Close() return nil, err } return conn, nil } // NewTLS uses c to construct a TransportAuthenticator based on TLS. func NewTLS(c *tls.Config) TransportAuthenticator { tc := &tlsCreds{*c} tc.config.NextProtos = alpnProtoStr return tc } // NewClientTLSFromCert constructs a TLS from the input certificate for client. func NewClientTLSFromCert(cp *x509.CertPool, serverName string) TransportAuthenticator { return NewTLS(&tls.Config{ServerName: serverName, RootCAs: cp}) } // NewClientTLSFromFile constructs a TLS from the input certificate file for client. func NewClientTLSFromFile(certFile, serverName string) (TransportAuthenticator, error) { b, err := ioutil.ReadFile(certFile) if err != nil { return nil, err } cp := x509.NewCertPool() if !cp.AppendCertsFromPEM(b) { return nil, fmt.Errorf("credentials: failed to append certificates") } return NewTLS(&tls.Config{ServerName: serverName, RootCAs: cp}), nil } // NewServerTLSFromCert constructs a TLS from the input certificate for server. func NewServerTLSFromCert(cert *tls.Certificate) TransportAuthenticator { return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) } // NewServerTLSFromFile constructs a TLS from the input certificate file and key // file for server. func NewServerTLSFromFile(certFile, keyFile string) (TransportAuthenticator, error) { cert, err := tls.LoadX509KeyPair(certFile, keyFile) if err != nil { return nil, err } return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil } // TokenSource supplies credentials from an oauth2.TokenSource. type TokenSource struct { oauth2.TokenSource } // GetRequestMetadata gets the request metadata as a map from a TokenSource. func (ts TokenSource) GetRequestMetadata(ctx context.Context) (map[string]string, error) { token, err := ts.Token() if err != nil { return nil, err } return map[string]string{ "authorization": token.TokenType + " " + token.AccessToken, }, nil } // NewComputeEngine constructs the credentials that fetches access tokens from // Google Compute Engine (GCE)'s metadata server. It is only valid to use this // if your program is running on a GCE instance. // TODO(dsymonds): Deprecate and remove this. func NewComputeEngine() Credentials { return TokenSource{google.ComputeTokenSource("")} } // serviceAccount represents credentials via JWT signing key. type serviceAccount struct { config *jwt.Config } func (s serviceAccount) GetRequestMetadata(ctx context.Context) (map[string]string, error) { token, err := s.config.TokenSource(ctx).Token() if err != nil { return nil, err } return map[string]string{ "authorization": token.TokenType + " " + token.AccessToken, }, nil } // NewServiceAccountFromKey constructs the credentials using the JSON key slice // from a Google Developers service account. func NewServiceAccountFromKey(jsonKey []byte, scope ...string) (Credentials, error) { config, err := google.JWTConfigFromJSON(jsonKey, scope...) if err != nil { return nil, err } return serviceAccount{config: config}, nil } // NewServiceAccountFromFile constructs the credentials using the JSON key file // of a Google Developers service account. func NewServiceAccountFromFile(keyFile string, scope ...string) (Credentials, error) { jsonKey, err := ioutil.ReadFile(keyFile) if err != nil { return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) } return NewServiceAccountFromKey(jsonKey, scope...) } // NewApplicationDefault returns "Application Default Credentials". For more // detail, see https://developers.google.com/accounts/docs/application-default-credentials. func NewApplicationDefault(ctx context.Context, scope ...string) (Credentials, error) { t, err := google.DefaultTokenSource(ctx, scope...) if err != nil { return nil, err } return TokenSource{t}, nil } ================================================ FILE: vendor/google.golang.org/grpc/doc.go ================================================ /* Package grpc implements an RPC system called gRPC. See https://github.com/grpc/grpc for more information about gRPC. */ package grpc ================================================ FILE: vendor/google.golang.org/grpc/examples/route_guide/README.md ================================================ # Description The route guide server and client demonstrate how to use grpc go libraries to perform unary, client streaming, server streaming and full duplex RPCs. Please refer to [Getting Started Guide for Go] (https://github.com/grpc/grpc-common/blob/master/go/gotutorial.md) for more information. See the definition of the route guide service in proto/route_guide.proto. # Run the sample code To compile and run the server, assuming you are in the root of the route_guide folder, i.e., .../examples/route_guide/, simply: ```sh $ go run server/server.go ``` Likewise, to run the client: ```sh $ go run client/client.go ``` # Optional command line flags The server and client both take optional command line flags. For example, the client and server run without TLS by default. To enable TLS: ```sh $ go run server/server.go -tls=true ``` and ```sh $ go run client/client.go -tls=true ``` ================================================ FILE: vendor/google.golang.org/grpc/examples/route_guide/client/client.go ================================================ /* * * Copyright 2015, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ // Package main implements a simple gRPC client that demonstrates how to use gRPC-Go libraries // to perform unary, client streaming, server streaming and full duplex RPCs. // // It interacts with the route guide service whose definition can be found in proto/route_guide.proto. package main import ( "flag" "io" "math/rand" "time" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/credentials" pb "google.golang.org/grpc/examples/route_guide/proto" "google.golang.org/grpc/grpclog" ) var ( tls = flag.Bool("tls", false, "Connection uses TLS if true, else plain TCP") caFile = flag.String("ca_file", "testdata/ca.pem", "The file containning the CA root cert file") serverAddr = flag.String("server_addr", "127.0.0.1:10000", "The server address in the format of host:port") serverHostOverride = flag.String("server_host_override", "x.test.youtube.com", "The server name use to verify the hostname returned by TLS handshake") ) // printFeature gets the feature for the given point. func printFeature(client pb.RouteGuideClient, point *pb.Point) { grpclog.Printf("Getting feature for point (%d, %d)", point.Latitude, point.Longitude) feature, err := client.GetFeature(context.Background(), point) if err != nil { grpclog.Fatalf("%v.GetFeatures(_) = _, %v: ", client, err) } grpclog.Println(feature) } // printFeatures lists all the features within the given bounding Rectangle. func printFeatures(client pb.RouteGuideClient, rect *pb.Rectangle) { grpclog.Printf("Looking for features within %v", rect) stream, err := client.ListFeatures(context.Background(), rect) if err != nil { grpclog.Fatalf("%v.ListFeatures(_) = _, %v", client, err) } for { feature, err := stream.Recv() if err == io.EOF { break } if err != nil { grpclog.Fatalf("%v.ListFeatures(_) = _, %v", client, err) } grpclog.Println(feature) } } // runRecordRoute sends a sequence of points to server and expects to get a RouteSummary from server. func runRecordRoute(client pb.RouteGuideClient) { // Create a random number of random points r := rand.New(rand.NewSource(time.Now().UnixNano())) pointCount := int(r.Int31n(100)) + 2 // Traverse at least two points var points []*pb.Point for i := 0; i < pointCount; i++ { points = append(points, randomPoint(r)) } grpclog.Printf("Traversing %d points.", len(points)) stream, err := client.RecordRoute(context.Background()) if err != nil { grpclog.Fatalf("%v.RecordRoute(_) = _, %v", client, err) } for _, point := range points { if err := stream.Send(point); err != nil { grpclog.Fatalf("%v.Send(%v) = %v", stream, point, err) } } reply, err := stream.CloseAndRecv() if err != nil { grpclog.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) } grpclog.Printf("Route summary: %v", reply) } // runRouteChat receives a sequence of route notes, while sending notes for various locations. func runRouteChat(client pb.RouteGuideClient) { notes := []*pb.RouteNote{ {&pb.Point{0, 1}, "First message"}, {&pb.Point{0, 2}, "Second message"}, {&pb.Point{0, 3}, "Third message"}, {&pb.Point{0, 1}, "Fourth message"}, {&pb.Point{0, 2}, "Fifth message"}, {&pb.Point{0, 3}, "Sixth message"}, } stream, err := client.RouteChat(context.Background()) if err != nil { grpclog.Fatalf("%v.RouteChat(_) = _, %v", client, err) } waitc := make(chan struct{}) go func() { for { in, err := stream.Recv() if err == io.EOF { // read done. close(waitc) return } if err != nil { grpclog.Fatalf("Failed to receive a note : %v", err) } grpclog.Printf("Got message %s at point(%d, %d)", in.Message, in.Location.Latitude, in.Location.Longitude) } }() for _, note := range notes { if err := stream.Send(note); err != nil { grpclog.Fatalf("Failed to send a note: %v", err) } } stream.CloseSend() <-waitc } func randomPoint(r *rand.Rand) *pb.Point { lat := (r.Int31n(180) - 90) * 1e7 long := (r.Int31n(360) - 180) * 1e7 return &pb.Point{lat, long} } func main() { flag.Parse() var opts []grpc.DialOption if *tls { var sn string if *serverHostOverride != "" { sn = *serverHostOverride } var creds credentials.TransportAuthenticator if *caFile != "" { var err error creds, err = credentials.NewClientTLSFromFile(*caFile, sn) if err != nil { grpclog.Fatalf("Failed to create TLS credentials %v", err) } } else { creds = credentials.NewClientTLSFromCert(nil, sn) } opts = append(opts, grpc.WithTransportCredentials(creds)) } conn, err := grpc.Dial(*serverAddr, opts...) if err != nil { grpclog.Fatalf("fail to dial: %v", err) } defer conn.Close() client := pb.NewRouteGuideClient(conn) // Looking for a valid feature printFeature(client, &pb.Point{409146138, -746188906}) // Feature missing. printFeature(client, &pb.Point{0, 0}) // Looking for features between 40, -75 and 42, -73. printFeatures(client, &pb.Rectangle{&pb.Point{400000000, -750000000}, &pb.Point{420000000, -730000000}}) // RecordRoute runRecordRoute(client) // RouteChat runRouteChat(client) } ================================================ FILE: vendor/google.golang.org/grpc/examples/route_guide/proto/route_guide.pb.go ================================================ // Code generated by protoc-gen-go. // source: route_guide.proto // DO NOT EDIT! /* Package proto is a generated protocol buffer package. It is generated from these files: route_guide.proto It has these top-level messages: Point Rectangle Feature RouteNote RouteSummary */ package proto import proto1 "github.com/golang/protobuf/proto" import ( context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // Reference imports to suppress errors if they are not otherwise used. var _ = proto1.Marshal // Points are represented as latitude-longitude pairs in the E7 representation // (degrees multiplied by 10**7 and rounded to the nearest integer). // Latitudes should be in the range +/- 90 degrees and longitude should be in // the range +/- 180 degrees (inclusive). type Point struct { Latitude int32 `protobuf:"varint,1,opt,name=latitude" json:"latitude,omitempty"` Longitude int32 `protobuf:"varint,2,opt,name=longitude" json:"longitude,omitempty"` } func (m *Point) Reset() { *m = Point{} } func (m *Point) String() string { return proto1.CompactTextString(m) } func (*Point) ProtoMessage() {} // A latitude-longitude rectangle, represented as two diagonally opposite // points "lo" and "hi". type Rectangle struct { // One corner of the rectangle. Lo *Point `protobuf:"bytes,1,opt,name=lo" json:"lo,omitempty"` // The other corner of the rectangle. Hi *Point `protobuf:"bytes,2,opt,name=hi" json:"hi,omitempty"` } func (m *Rectangle) Reset() { *m = Rectangle{} } func (m *Rectangle) String() string { return proto1.CompactTextString(m) } func (*Rectangle) ProtoMessage() {} func (m *Rectangle) GetLo() *Point { if m != nil { return m.Lo } return nil } func (m *Rectangle) GetHi() *Point { if m != nil { return m.Hi } return nil } // A feature names something at a given point. // // If a feature could not be named, the name is empty. type Feature struct { // The name of the feature. Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // The point where the feature is detected. Location *Point `protobuf:"bytes,2,opt,name=location" json:"location,omitempty"` } func (m *Feature) Reset() { *m = Feature{} } func (m *Feature) String() string { return proto1.CompactTextString(m) } func (*Feature) ProtoMessage() {} func (m *Feature) GetLocation() *Point { if m != nil { return m.Location } return nil } // A RouteNote is a message sent while at a given point. type RouteNote struct { // The location from which the message is sent. Location *Point `protobuf:"bytes,1,opt,name=location" json:"location,omitempty"` // The message to be sent. Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` } func (m *RouteNote) Reset() { *m = RouteNote{} } func (m *RouteNote) String() string { return proto1.CompactTextString(m) } func (*RouteNote) ProtoMessage() {} func (m *RouteNote) GetLocation() *Point { if m != nil { return m.Location } return nil } // A RouteSummary is received in response to a RecordRoute rpc. // // It contains the number of individual points received, the number of // detected features, and the total distance covered as the cumulative sum of // the distance between each point. type RouteSummary struct { // The number of points received. PointCount int32 `protobuf:"varint,1,opt,name=point_count" json:"point_count,omitempty"` // The number of known features passed while traversing the route. FeatureCount int32 `protobuf:"varint,2,opt,name=feature_count" json:"feature_count,omitempty"` // The distance covered in metres. Distance int32 `protobuf:"varint,3,opt,name=distance" json:"distance,omitempty"` // The duration of the traversal in seconds. ElapsedTime int32 `protobuf:"varint,4,opt,name=elapsed_time" json:"elapsed_time,omitempty"` } func (m *RouteSummary) Reset() { *m = RouteSummary{} } func (m *RouteSummary) String() string { return proto1.CompactTextString(m) } func (*RouteSummary) ProtoMessage() {} func init() { } // Client API for RouteGuide service type RouteGuideClient interface { // A simple RPC. // // Obtains the feature at a given position. // // If no feature is found for the given point, a feature with an empty name // should be returned. GetFeature(ctx context.Context, in *Point, opts ...grpc.CallOption) (*Feature, error) // A server-to-client streaming RPC. // // Obtains the Features available within the given Rectangle. Results are // streamed rather than returned at once (e.g. in a response message with a // repeated field), as the rectangle may cover a large area and contain a // huge number of features. ListFeatures(ctx context.Context, in *Rectangle, opts ...grpc.CallOption) (RouteGuide_ListFeaturesClient, error) // A client-to-server streaming RPC. // // Accepts a stream of Points on a route being traversed, returning a // RouteSummary when traversal is completed. RecordRoute(ctx context.Context, opts ...grpc.CallOption) (RouteGuide_RecordRouteClient, error) // A Bidirectional streaming RPC. // // Accepts a stream of RouteNotes sent while a route is being traversed, // while receiving other RouteNotes (e.g. from other users). RouteChat(ctx context.Context, opts ...grpc.CallOption) (RouteGuide_RouteChatClient, error) } type routeGuideClient struct { cc *grpc.ClientConn } func NewRouteGuideClient(cc *grpc.ClientConn) RouteGuideClient { return &routeGuideClient{cc} } func (c *routeGuideClient) GetFeature(ctx context.Context, in *Point, opts ...grpc.CallOption) (*Feature, error) { out := new(Feature) err := grpc.Invoke(ctx, "/proto.RouteGuide/GetFeature", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *routeGuideClient) ListFeatures(ctx context.Context, in *Rectangle, opts ...grpc.CallOption) (RouteGuide_ListFeaturesClient, error) { stream, err := grpc.NewClientStream(ctx, &_RouteGuide_serviceDesc.Streams[0], c.cc, "/proto.RouteGuide/ListFeatures", opts...) if err != nil { return nil, err } x := &routeGuideListFeaturesClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type RouteGuide_ListFeaturesClient interface { Recv() (*Feature, error) grpc.ClientStream } type routeGuideListFeaturesClient struct { grpc.ClientStream } func (x *routeGuideListFeaturesClient) Recv() (*Feature, error) { m := new(Feature) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *routeGuideClient) RecordRoute(ctx context.Context, opts ...grpc.CallOption) (RouteGuide_RecordRouteClient, error) { stream, err := grpc.NewClientStream(ctx, &_RouteGuide_serviceDesc.Streams[1], c.cc, "/proto.RouteGuide/RecordRoute", opts...) if err != nil { return nil, err } x := &routeGuideRecordRouteClient{stream} return x, nil } type RouteGuide_RecordRouteClient interface { Send(*Point) error CloseAndRecv() (*RouteSummary, error) grpc.ClientStream } type routeGuideRecordRouteClient struct { grpc.ClientStream } func (x *routeGuideRecordRouteClient) Send(m *Point) error { return x.ClientStream.SendMsg(m) } func (x *routeGuideRecordRouteClient) CloseAndRecv() (*RouteSummary, error) { if err := x.ClientStream.CloseSend(); err != nil { return nil, err } m := new(RouteSummary) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *routeGuideClient) RouteChat(ctx context.Context, opts ...grpc.CallOption) (RouteGuide_RouteChatClient, error) { stream, err := grpc.NewClientStream(ctx, &_RouteGuide_serviceDesc.Streams[2], c.cc, "/proto.RouteGuide/RouteChat", opts...) if err != nil { return nil, err } x := &routeGuideRouteChatClient{stream} return x, nil } type RouteGuide_RouteChatClient interface { Send(*RouteNote) error Recv() (*RouteNote, error) grpc.ClientStream } type routeGuideRouteChatClient struct { grpc.ClientStream } func (x *routeGuideRouteChatClient) Send(m *RouteNote) error { return x.ClientStream.SendMsg(m) } func (x *routeGuideRouteChatClient) Recv() (*RouteNote, error) { m := new(RouteNote) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } // Server API for RouteGuide service type RouteGuideServer interface { // A simple RPC. // // Obtains the feature at a given position. // // If no feature is found for the given point, a feature with an empty name // should be returned. GetFeature(context.Context, *Point) (*Feature, error) // A server-to-client streaming RPC. // // Obtains the Features available within the given Rectangle. Results are // streamed rather than returned at once (e.g. in a response message with a // repeated field), as the rectangle may cover a large area and contain a // huge number of features. ListFeatures(*Rectangle, RouteGuide_ListFeaturesServer) error // A client-to-server streaming RPC. // // Accepts a stream of Points on a route being traversed, returning a // RouteSummary when traversal is completed. RecordRoute(RouteGuide_RecordRouteServer) error // A Bidirectional streaming RPC. // // Accepts a stream of RouteNotes sent while a route is being traversed, // while receiving other RouteNotes (e.g. from other users). RouteChat(RouteGuide_RouteChatServer) error } func RegisterRouteGuideServer(s *grpc.Server, srv RouteGuideServer) { s.RegisterService(&_RouteGuide_serviceDesc, srv) } func _RouteGuide_GetFeature_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { in := new(Point) if err := codec.Unmarshal(buf, in); err != nil { return nil, err } out, err := srv.(RouteGuideServer).GetFeature(ctx, in) if err != nil { return nil, err } return out, nil } func _RouteGuide_ListFeatures_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(Rectangle) if err := stream.RecvMsg(m); err != nil { return err } return srv.(RouteGuideServer).ListFeatures(m, &routeGuideListFeaturesServer{stream}) } type RouteGuide_ListFeaturesServer interface { Send(*Feature) error grpc.ServerStream } type routeGuideListFeaturesServer struct { grpc.ServerStream } func (x *routeGuideListFeaturesServer) Send(m *Feature) error { return x.ServerStream.SendMsg(m) } func _RouteGuide_RecordRoute_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(RouteGuideServer).RecordRoute(&routeGuideRecordRouteServer{stream}) } type RouteGuide_RecordRouteServer interface { SendAndClose(*RouteSummary) error Recv() (*Point, error) grpc.ServerStream } type routeGuideRecordRouteServer struct { grpc.ServerStream } func (x *routeGuideRecordRouteServer) SendAndClose(m *RouteSummary) error { return x.ServerStream.SendMsg(m) } func (x *routeGuideRecordRouteServer) Recv() (*Point, error) { m := new(Point) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func _RouteGuide_RouteChat_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(RouteGuideServer).RouteChat(&routeGuideRouteChatServer{stream}) } type RouteGuide_RouteChatServer interface { Send(*RouteNote) error Recv() (*RouteNote, error) grpc.ServerStream } type routeGuideRouteChatServer struct { grpc.ServerStream } func (x *routeGuideRouteChatServer) Send(m *RouteNote) error { return x.ServerStream.SendMsg(m) } func (x *routeGuideRouteChatServer) Recv() (*RouteNote, error) { m := new(RouteNote) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } var _RouteGuide_serviceDesc = grpc.ServiceDesc{ ServiceName: "proto.RouteGuide", HandlerType: (*RouteGuideServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "GetFeature", Handler: _RouteGuide_GetFeature_Handler, }, }, Streams: []grpc.StreamDesc{ { StreamName: "ListFeatures", Handler: _RouteGuide_ListFeatures_Handler, ServerStreams: true, }, { StreamName: "RecordRoute", Handler: _RouteGuide_RecordRoute_Handler, ClientStreams: true, }, { StreamName: "RouteChat", Handler: _RouteGuide_RouteChat_Handler, ServerStreams: true, ClientStreams: true, }, }, } ================================================ FILE: vendor/google.golang.org/grpc/examples/route_guide/proto/route_guide.proto ================================================ // Copyright 2015, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. syntax = "proto3"; package proto; // Interface exported by the server. service RouteGuide { // A simple RPC. // // Obtains the feature at a given position. // // If no feature is found for the given point, a feature with an empty name // should be returned. rpc GetFeature(Point) returns (Feature) {} // A server-to-client streaming RPC. // // Obtains the Features available within the given Rectangle. Results are // streamed rather than returned at once (e.g. in a response message with a // repeated field), as the rectangle may cover a large area and contain a // huge number of features. rpc ListFeatures(Rectangle) returns (stream Feature) {} // A client-to-server streaming RPC. // // Accepts a stream of Points on a route being traversed, returning a // RouteSummary when traversal is completed. rpc RecordRoute(stream Point) returns (RouteSummary) {} // A Bidirectional streaming RPC. // // Accepts a stream of RouteNotes sent while a route is being traversed, // while receiving other RouteNotes (e.g. from other users). rpc RouteChat(stream RouteNote) returns (stream RouteNote) {} } // Points are represented as latitude-longitude pairs in the E7 representation // (degrees multiplied by 10**7 and rounded to the nearest integer). // Latitudes should be in the range +/- 90 degrees and longitude should be in // the range +/- 180 degrees (inclusive). message Point { int32 latitude = 1; int32 longitude = 2; } // A latitude-longitude rectangle, represented as two diagonally opposite // points "lo" and "hi". message Rectangle { // One corner of the rectangle. Point lo = 1; // The other corner of the rectangle. Point hi = 2; } // A feature names something at a given point. // // If a feature could not be named, the name is empty. message Feature { // The name of the feature. string name = 1; // The point where the feature is detected. Point location = 2; } // A RouteNote is a message sent while at a given point. message RouteNote { // The location from which the message is sent. Point location = 1; // The message to be sent. string message = 2; } // A RouteSummary is received in response to a RecordRoute rpc. // // It contains the number of individual points received, the number of // detected features, and the total distance covered as the cumulative sum of // the distance between each point. message RouteSummary { // The number of points received. int32 point_count = 1; // The number of known features passed while traversing the route. int32 feature_count = 2; // The distance covered in metres. int32 distance = 3; // The duration of the traversal in seconds. int32 elapsed_time = 4; } ================================================ FILE: vendor/google.golang.org/grpc/examples/route_guide/server/server.go ================================================ /* * * Copyright 2015, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ // Package main implements a simple gRPC server that demonstrates how to use gRPC-Go libraries // to perform unary, client streaming, server streaming and full duplex RPCs. // // It implements the route guide service whose definition can be found in proto/route_guide.proto. package main import ( "encoding/json" "flag" "fmt" "io" "io/ioutil" "math" "net" "time" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" proto "github.com/golang/protobuf/proto" pb "google.golang.org/grpc/examples/route_guide/proto" ) var ( tls = flag.Bool("tls", false, "Connection uses TLS if true, else plain TCP") certFile = flag.String("cert_file", "testdata/server1.pem", "The TLS cert file") keyFile = flag.String("key_file", "testdata/server1.key", "The TLS key file") jsonDBFile = flag.String("json_db_file", "testdata/route_guide_db.json", "A json file containing a list of features") port = flag.Int("port", 10000, "The server port") ) type routeGuideServer struct { savedFeatures []*pb.Feature routeNotes map[string][]*pb.RouteNote } // GetFeature returns the feature at the given point. func (s *routeGuideServer) GetFeature(ctx context.Context, point *pb.Point) (*pb.Feature, error) { for _, feature := range s.savedFeatures { if proto.Equal(feature.Location, point) { return feature, nil } } // No feature was found, return an unnamed feature return &pb.Feature{"", point}, nil } // ListFeatures lists all features comtained within the given bounding Rectangle. func (s *routeGuideServer) ListFeatures(rect *pb.Rectangle, stream pb.RouteGuide_ListFeaturesServer) error { for _, feature := range s.savedFeatures { if inRange(feature.Location, rect) { if err := stream.Send(feature); err != nil { return err } } } return nil } // RecordRoute records a route composited of a sequence of points. // // It gets a stream of points, and responds with statistics about the "trip": // number of points, number of known features visited, total distance traveled, and // total time spent. func (s *routeGuideServer) RecordRoute(stream pb.RouteGuide_RecordRouteServer) error { var pointCount, featureCount, distance int32 var lastPoint *pb.Point startTime := time.Now() for { point, err := stream.Recv() if err == io.EOF { endTime := time.Now() return stream.SendAndClose(&pb.RouteSummary{ PointCount: pointCount, FeatureCount: featureCount, Distance: distance, ElapsedTime: int32(endTime.Sub(startTime).Seconds()), }) } if err != nil { return err } pointCount++ for _, feature := range s.savedFeatures { if proto.Equal(feature.Location, point) { featureCount++ } } if lastPoint != nil { distance += calcDistance(lastPoint, point) } lastPoint = point } } // RouteChat receives a stream of message/location pairs, and responds with a stream of all // previous messages at each of those locations. func (s *routeGuideServer) RouteChat(stream pb.RouteGuide_RouteChatServer) error { for { in, err := stream.Recv() if err == io.EOF { return nil } if err != nil { return err } key := serialize(in.Location) if _, present := s.routeNotes[key]; !present { s.routeNotes[key] = []*pb.RouteNote{in} } else { s.routeNotes[key] = append(s.routeNotes[key], in) } for _, note := range s.routeNotes[key] { if err := stream.Send(note); err != nil { return err } } } } // loadFeatures loads features from a JSON file. func (s *routeGuideServer) loadFeatures(filePath string) { file, err := ioutil.ReadFile(filePath) if err != nil { grpclog.Fatalf("Failed to load default features: %v", err) } if err := json.Unmarshal(file, &s.savedFeatures); err != nil { grpclog.Fatalf("Failed to load default features: %v", err) } } func toRadians(num float64) float64 { return num * math.Pi / float64(180) } // calcDistance calculates the distance between two points using the "haversine" formula. // This code was taken from http://www.movable-type.co.uk/scripts/latlong.html. func calcDistance(p1 *pb.Point, p2 *pb.Point) int32 { const CordFactor float64 = 1e7 const R float64 = float64(6371000) // metres lat1 := float64(p1.Latitude) / CordFactor lat2 := float64(p2.Latitude) / CordFactor lng1 := float64(p1.Longitude) / CordFactor lng2 := float64(p2.Longitude) / CordFactor φ1 := toRadians(lat1) φ2 := toRadians(lat2) Δφ := toRadians(lat2 - lat1) Δλ := toRadians(lng2 - lng1) a := math.Sin(Δφ/2)*math.Sin(Δφ/2) + math.Cos(φ1)*math.Cos(φ2)* math.Sin(Δλ/2)*math.Sin(Δλ/2) c := 2 * math.Atan2(math.Sqrt(a), math.Sqrt(1-a)) distance := R * c return int32(distance) } func inRange(point *pb.Point, rect *pb.Rectangle) bool { left := math.Min(float64(rect.Lo.Longitude), float64(rect.Hi.Longitude)) right := math.Max(float64(rect.Lo.Longitude), float64(rect.Hi.Longitude)) top := math.Max(float64(rect.Lo.Latitude), float64(rect.Hi.Latitude)) bottom := math.Min(float64(rect.Lo.Latitude), float64(rect.Hi.Latitude)) if float64(point.Longitude) >= left && float64(point.Longitude) <= right && float64(point.Latitude) >= bottom && float64(point.Latitude) <= top { return true } return false } func serialize(point *pb.Point) string { return fmt.Sprintf("%d %d", point.Latitude, point.Longitude) } func newServer() *routeGuideServer { s := new(routeGuideServer) s.loadFeatures(*jsonDBFile) s.routeNotes = make(map[string][]*pb.RouteNote) return s } func main() { flag.Parse() lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) if err != nil { grpclog.Fatalf("failed to listen: %v", err) } var opts []grpc.ServerOption if *tls { creds, err := credentials.NewServerTLSFromFile(*certFile, *keyFile) if err != nil { grpclog.Fatalf("Failed to generate credentials %v", err) } opts = []grpc.ServerOption{grpc.Creds(creds)} } grpcServer := grpc.NewServer(opts...) pb.RegisterRouteGuideServer(grpcServer, newServer()) grpcServer.Serve(lis) } ================================================ FILE: vendor/google.golang.org/grpc/grpc-auth-support.md ================================================ # Authentication As outlined here gRPC supports a number of different mechanisms for asserting identity between an client and server. We'll present some code-samples here demonstrating how to provide TLS support encryption and identity assertions as well as passing OAuth2 tokens to services that support it. # Enabling TLS on a gRPC client ```Go conn, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, "")) ``` # Enabling TLS on a gRPC server ```Go creds, err := credentials.NewServerTLSFromFile(certFile, keyFile) if err != nil { log.Fatalf("Failed to generate credentials %v", err) } lis, err := net.Listen("tcp", ":0") server := grpc.NewServer(grpc.Creds(creds)) ... server.Serve(lis) ``` # Authenticating with Google ## Google Compute Engine (GCE) ```Go conn, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, ""), grpc.WithPerRPCCredentials(credentials.NewComputeEngine()))) ``` ## JWT ```Go jwtCreds, err := credentials.NewServiceAccountFromFile(*serviceAccountKeyFile, *oauthScope) if err != nil { log.Fatalf("Failed to create JWT credentials: %v", err) } conn, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, ""), grpc.WithPerRPCCredentials(jwtCreds))) ``` ================================================ FILE: vendor/google.golang.org/grpc/grpclog/logger.go ================================================ /* * * Copyright 2015, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ /* Package log defines logging for grpc. */ package grpclog import ( "log" "os" "github.com/golang/glog" ) var ( // GLogger is a Logger that uses glog. This is the default logger. GLogger Logger = &glogger{} // StdLogger is a Logger that uses golang's standard logger. StdLogger Logger = log.New(os.Stderr, "", log.LstdFlags) logger = GLogger ) // Logger mimics golang's standard Logger as an interface. type Logger interface { Fatal(args ...interface{}) Fatalf(format string, args ...interface{}) Fatalln(args ...interface{}) Print(args ...interface{}) Printf(format string, args ...interface{}) Println(args ...interface{}) } // SetLogger sets the logger that is used in grpc. func SetLogger(l Logger) { logger = l } // Fatal is equivalent to Print() followed by a call to os.Exit() with a non-zero exit code. func Fatal(args ...interface{}) { logger.Fatal(args...) } // Fatal is equivalent to Printf() followed by a call to os.Exit() with a non-zero exit code. func Fatalf(format string, args ...interface{}) { logger.Fatalf(format, args...) } // Fatal is equivalent to Println() followed by a call to os.Exit()) with a non-zero exit code. func Fatalln(args ...interface{}) { logger.Fatalln(args...) } // Print prints to the logger. Arguments are handled in the manner of fmt.Print. func Print(args ...interface{}) { logger.Print(args...) } // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. func Printf(format string, args ...interface{}) { logger.Printf(format, args...) } // Println prints to the logger. Arguments are handled in the manner of fmt.Println. func Println(args ...interface{}) { logger.Println(args...) } type glogger struct{} func (g *glogger) Fatal(args ...interface{}) { glog.Fatal(args...) } func (g *glogger) Fatalf(format string, args ...interface{}) { glog.Fatalf(format, args...) } func (g *glogger) Fatalln(args ...interface{}) { glog.Fatalln(args...) } func (g *glogger) Print(args ...interface{}) { glog.Info(args...) } func (g *glogger) Printf(format string, args ...interface{}) { glog.Infof(format, args...) } func (g *glogger) Println(args ...interface{}) { glog.Infoln(args...) } ================================================ FILE: vendor/google.golang.org/grpc/interop/client/client.go ================================================ /* * * Copyright 2014, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ package main import ( "flag" "io" "io/ioutil" "net" "strconv" "strings" "github.com/golang/protobuf/proto" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" testpb "google.golang.org/grpc/interop/grpc_testing" "google.golang.org/grpc/metadata" ) var ( useTLS = flag.Bool("use_tls", false, "Connection uses TLS if true, else plain TCP") caFile = flag.String("tls_ca_file", "testdata/ca.pem", "The file containning the CA root cert file") serviceAccountKeyFile = flag.String("service_account_key_file", "", "Path to service account json key file") oauthScope = flag.String("oauth_scope", "", "The scope for OAuth2 tokens") defaultServiceAccount = flag.String("default_service_account", "", "Email of GCE default service account") serverHost = flag.String("server_host", "127.0.0.1", "The server host name") serverPort = flag.Int("server_port", 10000, "The server port number") tlsServerName = flag.String("server_host_override", "x.test.youtube.com", "The server name use to verify the hostname returned by TLS handshake if it is not empty. Otherwise, --server_host is used.") testCase = flag.String("test_case", "large_unary", `Configure different test cases. Valid options are: empty_unary : empty (zero bytes) request and response; large_unary : single request and (large) response; client_streaming : request streaming with single response; server_streaming : single request with response streaming; ping_pong : full-duplex streaming; compute_engine_creds: large_unary with compute engine auth; service_account_creds: large_unary with service account auth; cancel_after_begin: cancellation after metadata has been sent but before payloads are sent; cancel_after_first_response: cancellation after receiving 1st message from the server.`) ) var ( reqSizes = []int{27182, 8, 1828, 45904} respSizes = []int{31415, 9, 2653, 58979} largeReqSize = 271828 largeRespSize = 314159 ) func newPayload(t testpb.PayloadType, size int) *testpb.Payload { if size < 0 { grpclog.Fatalf("Requested a response with invalid length %d", size) } body := make([]byte, size) switch t { case testpb.PayloadType_COMPRESSABLE: case testpb.PayloadType_UNCOMPRESSABLE: grpclog.Fatalf("PayloadType UNCOMPRESSABLE is not supported") default: grpclog.Fatalf("Unsupported payload type: %d", t) } return &testpb.Payload{ Type: t.Enum(), Body: body, } } func doEmptyUnaryCall(tc testpb.TestServiceClient) { reply, err := tc.EmptyCall(context.Background(), &testpb.Empty{}) if err != nil { grpclog.Fatal("/TestService/EmptyCall RPC failed: ", err) } if !proto.Equal(&testpb.Empty{}, reply) { grpclog.Fatalf("/TestService/EmptyCall receives %v, want %v", reply, testpb.Empty{}) } grpclog.Println("EmptyUnaryCall done") } func doLargeUnaryCall(tc testpb.TestServiceClient) { pl := newPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(largeRespSize)), Payload: pl, } reply, err := tc.UnaryCall(context.Background(), req) if err != nil { grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) } t := reply.GetPayload().GetType() s := len(reply.GetPayload().GetBody()) if t != testpb.PayloadType_COMPRESSABLE || s != largeRespSize { grpclog.Fatalf("Got the reply with type %d len %d; want %d, %d", t, s, testpb.PayloadType_COMPRESSABLE, largeRespSize) } grpclog.Println("LargeUnaryCall done") } func doClientStreaming(tc testpb.TestServiceClient) { stream, err := tc.StreamingInputCall(context.Background()) if err != nil { grpclog.Fatalf("%v.StreamingInputCall(_) = _, %v", tc, err) } var sum int for _, s := range reqSizes { pl := newPayload(testpb.PayloadType_COMPRESSABLE, s) req := &testpb.StreamingInputCallRequest{ Payload: pl, } if err := stream.Send(req); err != nil { grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err) } sum += s grpclog.Printf("Sent a request of size %d, aggregated size %d", s, sum) } reply, err := stream.CloseAndRecv() if err != nil { grpclog.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) } if reply.GetAggregatedPayloadSize() != int32(sum) { grpclog.Fatalf("%v.CloseAndRecv().GetAggregatePayloadSize() = %v; want %v", stream, reply.GetAggregatedPayloadSize(), sum) } grpclog.Println("ClientStreaming done") } func doServerStreaming(tc testpb.TestServiceClient) { respParam := make([]*testpb.ResponseParameters, len(respSizes)) for i, s := range respSizes { respParam[i] = &testpb.ResponseParameters{ Size: proto.Int32(int32(s)), } } req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseParameters: respParam, } stream, err := tc.StreamingOutputCall(context.Background(), req) if err != nil { grpclog.Fatalf("%v.StreamingOutputCall(_) = _, %v", tc, err) } var rpcStatus error var respCnt int var index int for { reply, err := stream.Recv() if err != nil { rpcStatus = err break } t := reply.GetPayload().GetType() if t != testpb.PayloadType_COMPRESSABLE { grpclog.Fatalf("Got the reply of type %d, want %d", t, testpb.PayloadType_COMPRESSABLE) } size := len(reply.GetPayload().GetBody()) if size != int(respSizes[index]) { grpclog.Fatalf("Got reply body of length %d, want %d", size, respSizes[index]) } index++ respCnt++ } if rpcStatus != io.EOF { grpclog.Fatalf("Failed to finish the server streaming rpc: %v", err) } if respCnt != len(respSizes) { grpclog.Fatalf("Got %d reply, want %d", len(respSizes), respCnt) } grpclog.Println("ServerStreaming done") } func doPingPong(tc testpb.TestServiceClient) { stream, err := tc.FullDuplexCall(context.Background()) if err != nil { grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) } var index int for index < len(reqSizes) { respParam := []*testpb.ResponseParameters{ { Size: proto.Int32(int32(respSizes[index])), }, } pl := newPayload(testpb.PayloadType_COMPRESSABLE, reqSizes[index]) req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseParameters: respParam, Payload: pl, } if err := stream.Send(req); err != nil { grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err) } reply, err := stream.Recv() if err != nil { grpclog.Fatalf("%v.Recv() = %v", stream, err) } t := reply.GetPayload().GetType() if t != testpb.PayloadType_COMPRESSABLE { grpclog.Fatalf("Got the reply of type %d, want %d", t, testpb.PayloadType_COMPRESSABLE) } size := len(reply.GetPayload().GetBody()) if size != int(respSizes[index]) { grpclog.Fatalf("Got reply body of length %d, want %d", size, respSizes[index]) } index++ } if err := stream.CloseSend(); err != nil { grpclog.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil) } if _, err := stream.Recv(); err != io.EOF { grpclog.Fatalf("%v failed to complele the ping pong test: %v", stream, err) } grpclog.Println("Pingpong done") } func doComputeEngineCreds(tc testpb.TestServiceClient) { pl := newPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(largeRespSize)), Payload: pl, FillUsername: proto.Bool(true), FillOauthScope: proto.Bool(true), } reply, err := tc.UnaryCall(context.Background(), req) if err != nil { grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) } user := reply.GetUsername() scope := reply.GetOauthScope() if user != *defaultServiceAccount { grpclog.Fatalf("Got user name %q, want %q.", user, *defaultServiceAccount) } if !strings.Contains(*oauthScope, scope) { grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, *oauthScope) } grpclog.Println("ComputeEngineCreds done") } func getServiceAccountJSONKey() []byte { jsonKey, err := ioutil.ReadFile(*serviceAccountKeyFile) if err != nil { grpclog.Fatalf("Failed to read the service account key file: %v", err) } return jsonKey } func doServiceAccountCreds(tc testpb.TestServiceClient) { pl := newPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(largeRespSize)), Payload: pl, FillUsername: proto.Bool(true), FillOauthScope: proto.Bool(true), } reply, err := tc.UnaryCall(context.Background(), req) if err != nil { grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) } jsonKey := getServiceAccountJSONKey() user := reply.GetUsername() scope := reply.GetOauthScope() if !strings.Contains(string(jsonKey), user) { grpclog.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey) } if !strings.Contains(*oauthScope, scope) { grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, *oauthScope) } grpclog.Println("ServiceAccountCreds done") } var ( testMetadata = metadata.MD{ "key1": "value1", "key2": "value2", } ) func doCancelAfterBegin(tc testpb.TestServiceClient) { ctx, cancel := context.WithCancel(metadata.NewContext(context.Background(), testMetadata)) stream, err := tc.StreamingInputCall(ctx) if err != nil { grpclog.Fatalf("%v.StreamingInputCall(_) = _, %v", tc, err) } cancel() _, err = stream.CloseAndRecv() if grpc.Code(err) != codes.Canceled { grpclog.Fatalf("%v.CloseAndRecv() got error code %d, want %d", stream, grpc.Code(err), codes.Canceled) } grpclog.Println("CancelAfterBegin done") } func doCancelAfterFirstResponse(tc testpb.TestServiceClient) { ctx, cancel := context.WithCancel(context.Background()) stream, err := tc.FullDuplexCall(ctx) if err != nil { grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) } respParam := []*testpb.ResponseParameters{ { Size: proto.Int32(31415), }, } pl := newPayload(testpb.PayloadType_COMPRESSABLE, 27182) req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseParameters: respParam, Payload: pl, } if err := stream.Send(req); err != nil { grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err) } if _, err := stream.Recv(); err != nil { grpclog.Fatalf("%v.Recv() = %v", stream, err) } cancel() if _, err := stream.Recv(); grpc.Code(err) != codes.Canceled { grpclog.Fatalf("%v compleled with error code %d, want %d", stream, grpc.Code(err), codes.Canceled) } grpclog.Println("CancelAfterFirstResponse done") } func main() { flag.Parse() serverAddr := net.JoinHostPort(*serverHost, strconv.Itoa(*serverPort)) var opts []grpc.DialOption if *useTLS { var sn string if *tlsServerName != "" { sn = *tlsServerName } var creds credentials.TransportAuthenticator if *caFile != "" { var err error creds, err = credentials.NewClientTLSFromFile(*caFile, sn) if err != nil { grpclog.Fatalf("Failed to create TLS credentials %v", err) } } else { creds = credentials.NewClientTLSFromCert(nil, sn) } opts = append(opts, grpc.WithTransportCredentials(creds)) if *testCase == "compute_engine_creds" { opts = append(opts, grpc.WithPerRPCCredentials(credentials.NewComputeEngine())) } else if *testCase == "service_account_creds" { jwtCreds, err := credentials.NewServiceAccountFromFile(*serviceAccountKeyFile, *oauthScope) if err != nil { grpclog.Fatalf("Failed to create JWT credentials: %v", err) } opts = append(opts, grpc.WithPerRPCCredentials(jwtCreds)) } } conn, err := grpc.Dial(serverAddr, opts...) if err != nil { grpclog.Fatalf("Fail to dial: %v", err) } defer conn.Close() tc := testpb.NewTestServiceClient(conn) switch *testCase { case "empty_unary": doEmptyUnaryCall(tc) case "large_unary": doLargeUnaryCall(tc) case "client_streaming": doClientStreaming(tc) case "server_streaming": doServerStreaming(tc) case "ping_pong": doPingPong(tc) case "compute_engine_creds": if !*useTLS { grpclog.Fatalf("TLS is not enabled. TLS is required to execute compute_engine_creds test case.") } doComputeEngineCreds(tc) case "service_account_creds": if !*useTLS { grpclog.Fatalf("TLS is not enabled. TLS is required to execute service_account_creds test case.") } doServiceAccountCreds(tc) case "cancel_after_begin": doCancelAfterBegin(tc) case "cancel_after_first_response": doCancelAfterFirstResponse(tc) default: grpclog.Fatal("Unsupported test case: ", *testCase) } } ================================================ FILE: vendor/google.golang.org/grpc/interop/grpc_testing/test.pb.go ================================================ // Code generated by protoc-gen-go. // source: src/google.golang.org/grpc/test/grpc_testing/test.proto // DO NOT EDIT! /* Package grpc_testing is a generated protocol buffer package. It is generated from these files: src/google.golang.org/grpc/test/grpc_testing/test.proto It has these top-level messages: Empty Payload SimpleRequest SimpleResponse StreamingInputCallRequest StreamingInputCallResponse ResponseParameters StreamingOutputCallRequest StreamingOutputCallResponse */ package grpc_testing import proto "github.com/golang/protobuf/proto" import math "math" import ( context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = math.Inf // The type of payload that should be returned. type PayloadType int32 const ( // Compressable text format. PayloadType_COMPRESSABLE PayloadType = 0 // Uncompressable binary format. PayloadType_UNCOMPRESSABLE PayloadType = 1 // Randomly chosen from all other formats defined in this enum. PayloadType_RANDOM PayloadType = 2 ) var PayloadType_name = map[int32]string{ 0: "COMPRESSABLE", 1: "UNCOMPRESSABLE", 2: "RANDOM", } var PayloadType_value = map[string]int32{ "COMPRESSABLE": 0, "UNCOMPRESSABLE": 1, "RANDOM": 2, } func (x PayloadType) Enum() *PayloadType { p := new(PayloadType) *p = x return p } func (x PayloadType) String() string { return proto.EnumName(PayloadType_name, int32(x)) } func (x *PayloadType) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(PayloadType_value, data, "PayloadType") if err != nil { return err } *x = PayloadType(value) return nil } type Empty struct { XXX_unrecognized []byte `json:"-"` } func (m *Empty) Reset() { *m = Empty{} } func (m *Empty) String() string { return proto.CompactTextString(m) } func (*Empty) ProtoMessage() {} // A block of data, to simply increase gRPC message size. type Payload struct { // The type of data in body. Type *PayloadType `protobuf:"varint,1,opt,name=type,enum=grpc.testing.PayloadType" json:"type,omitempty"` // Primary contents of payload. Body []byte `protobuf:"bytes,2,opt,name=body" json:"body,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Payload) Reset() { *m = Payload{} } func (m *Payload) String() string { return proto.CompactTextString(m) } func (*Payload) ProtoMessage() {} func (m *Payload) GetType() PayloadType { if m != nil && m.Type != nil { return *m.Type } return PayloadType_COMPRESSABLE } func (m *Payload) GetBody() []byte { if m != nil { return m.Body } return nil } // Unary request. type SimpleRequest struct { // Desired payload type in the response from the server. // If response_type is RANDOM, server randomly chooses one from other formats. ResponseType *PayloadType `protobuf:"varint,1,opt,name=response_type,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` // Desired payload size in the response from the server. // If response_type is COMPRESSABLE, this denotes the size before compression. ResponseSize *int32 `protobuf:"varint,2,opt,name=response_size" json:"response_size,omitempty"` // Optional input payload sent along with the request. Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"` // Whether SimpleResponse should include username. FillUsername *bool `protobuf:"varint,4,opt,name=fill_username" json:"fill_username,omitempty"` // Whether SimpleResponse should include OAuth scope. FillOauthScope *bool `protobuf:"varint,5,opt,name=fill_oauth_scope" json:"fill_oauth_scope,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *SimpleRequest) Reset() { *m = SimpleRequest{} } func (m *SimpleRequest) String() string { return proto.CompactTextString(m) } func (*SimpleRequest) ProtoMessage() {} func (m *SimpleRequest) GetResponseType() PayloadType { if m != nil && m.ResponseType != nil { return *m.ResponseType } return PayloadType_COMPRESSABLE } func (m *SimpleRequest) GetResponseSize() int32 { if m != nil && m.ResponseSize != nil { return *m.ResponseSize } return 0 } func (m *SimpleRequest) GetPayload() *Payload { if m != nil { return m.Payload } return nil } func (m *SimpleRequest) GetFillUsername() bool { if m != nil && m.FillUsername != nil { return *m.FillUsername } return false } func (m *SimpleRequest) GetFillOauthScope() bool { if m != nil && m.FillOauthScope != nil { return *m.FillOauthScope } return false } // Unary response, as configured by the request. type SimpleResponse struct { // Payload to increase message size. Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` // The user the request came from, for verifying authentication was // successful when the client expected it. Username *string `protobuf:"bytes,2,opt,name=username" json:"username,omitempty"` // OAuth scope. OauthScope *string `protobuf:"bytes,3,opt,name=oauth_scope" json:"oauth_scope,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *SimpleResponse) Reset() { *m = SimpleResponse{} } func (m *SimpleResponse) String() string { return proto.CompactTextString(m) } func (*SimpleResponse) ProtoMessage() {} func (m *SimpleResponse) GetPayload() *Payload { if m != nil { return m.Payload } return nil } func (m *SimpleResponse) GetUsername() string { if m != nil && m.Username != nil { return *m.Username } return "" } func (m *SimpleResponse) GetOauthScope() string { if m != nil && m.OauthScope != nil { return *m.OauthScope } return "" } // Client-streaming request. type StreamingInputCallRequest struct { // Optional input payload sent along with the request. Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *StreamingInputCallRequest) Reset() { *m = StreamingInputCallRequest{} } func (m *StreamingInputCallRequest) String() string { return proto.CompactTextString(m) } func (*StreamingInputCallRequest) ProtoMessage() {} func (m *StreamingInputCallRequest) GetPayload() *Payload { if m != nil { return m.Payload } return nil } // Client-streaming response. type StreamingInputCallResponse struct { // Aggregated size of payloads received from the client. AggregatedPayloadSize *int32 `protobuf:"varint,1,opt,name=aggregated_payload_size" json:"aggregated_payload_size,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *StreamingInputCallResponse) Reset() { *m = StreamingInputCallResponse{} } func (m *StreamingInputCallResponse) String() string { return proto.CompactTextString(m) } func (*StreamingInputCallResponse) ProtoMessage() {} func (m *StreamingInputCallResponse) GetAggregatedPayloadSize() int32 { if m != nil && m.AggregatedPayloadSize != nil { return *m.AggregatedPayloadSize } return 0 } // Configuration for a particular response. type ResponseParameters struct { // Desired payload sizes in responses from the server. // If response_type is COMPRESSABLE, this denotes the size before compression. Size *int32 `protobuf:"varint,1,opt,name=size" json:"size,omitempty"` // Desired interval between consecutive responses in the response stream in // microseconds. IntervalUs *int32 `protobuf:"varint,2,opt,name=interval_us" json:"interval_us,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *ResponseParameters) Reset() { *m = ResponseParameters{} } func (m *ResponseParameters) String() string { return proto.CompactTextString(m) } func (*ResponseParameters) ProtoMessage() {} func (m *ResponseParameters) GetSize() int32 { if m != nil && m.Size != nil { return *m.Size } return 0 } func (m *ResponseParameters) GetIntervalUs() int32 { if m != nil && m.IntervalUs != nil { return *m.IntervalUs } return 0 } // Server-streaming request. type StreamingOutputCallRequest struct { // Desired payload type in the response from the server. // If response_type is RANDOM, the payload from each response in the stream // might be of different types. This is to simulate a mixed type of payload // stream. ResponseType *PayloadType `protobuf:"varint,1,opt,name=response_type,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` // Configuration for each expected response message. ResponseParameters []*ResponseParameters `protobuf:"bytes,2,rep,name=response_parameters" json:"response_parameters,omitempty"` // Optional input payload sent along with the request. Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *StreamingOutputCallRequest) Reset() { *m = StreamingOutputCallRequest{} } func (m *StreamingOutputCallRequest) String() string { return proto.CompactTextString(m) } func (*StreamingOutputCallRequest) ProtoMessage() {} func (m *StreamingOutputCallRequest) GetResponseType() PayloadType { if m != nil && m.ResponseType != nil { return *m.ResponseType } return PayloadType_COMPRESSABLE } func (m *StreamingOutputCallRequest) GetResponseParameters() []*ResponseParameters { if m != nil { return m.ResponseParameters } return nil } func (m *StreamingOutputCallRequest) GetPayload() *Payload { if m != nil { return m.Payload } return nil } // Server-streaming response, as configured by the request and parameters. type StreamingOutputCallResponse struct { // Payload to increase response size. Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *StreamingOutputCallResponse) Reset() { *m = StreamingOutputCallResponse{} } func (m *StreamingOutputCallResponse) String() string { return proto.CompactTextString(m) } func (*StreamingOutputCallResponse) ProtoMessage() {} func (m *StreamingOutputCallResponse) GetPayload() *Payload { if m != nil { return m.Payload } return nil } func init() { proto.RegisterEnum("grpc.testing.PayloadType", PayloadType_name, PayloadType_value) } // Client API for TestService service type TestServiceClient interface { // One empty request followed by one empty response. EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) // One request followed by one response. // The server returns the client payload as-is. UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) // One request followed by a sequence of responses (streamed download). // The server returns the payload with client desired type and sizes. StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) // A sequence of requests followed by one response (streamed upload). // The server returns the aggregated size of client payload as the result. StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) // A sequence of requests with each request served by the server immediately. // As one request could lead to multiple responses, this interface // demonstrates the idea of full duplexing. FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) // A sequence of requests followed by a sequence of responses. // The server buffers all the client requests and then serves them in order. A // stream of responses are returned to the client when the server starts with // first request. HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) } type testServiceClient struct { cc *grpc.ClientConn } func NewTestServiceClient(cc *grpc.ClientConn) TestServiceClient { return &testServiceClient{cc} } func (c *testServiceClient) EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) err := grpc.Invoke(ctx, "/grpc.testing.TestService/EmptyCall", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *testServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { out := new(SimpleResponse) err := grpc.Invoke(ctx, "/grpc.testing.TestService/UnaryCall", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *testServiceClient) StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) { stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[0], c.cc, "/grpc.testing.TestService/StreamingOutputCall", opts...) if err != nil { return nil, err } x := &testServiceStreamingOutputCallClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type TestService_StreamingOutputCallClient interface { Recv() (*StreamingOutputCallResponse, error) grpc.ClientStream } type testServiceStreamingOutputCallClient struct { grpc.ClientStream } func (x *testServiceStreamingOutputCallClient) Recv() (*StreamingOutputCallResponse, error) { m := new(StreamingOutputCallResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *testServiceClient) StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) { stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[1], c.cc, "/grpc.testing.TestService/StreamingInputCall", opts...) if err != nil { return nil, err } x := &testServiceStreamingInputCallClient{stream} return x, nil } type TestService_StreamingInputCallClient interface { Send(*StreamingInputCallRequest) error CloseAndRecv() (*StreamingInputCallResponse, error) grpc.ClientStream } type testServiceStreamingInputCallClient struct { grpc.ClientStream } func (x *testServiceStreamingInputCallClient) Send(m *StreamingInputCallRequest) error { return x.ClientStream.SendMsg(m) } func (x *testServiceStreamingInputCallClient) CloseAndRecv() (*StreamingInputCallResponse, error) { if err := x.ClientStream.CloseSend(); err != nil { return nil, err } m := new(StreamingInputCallResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *testServiceClient) FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) { stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[2], c.cc, "/grpc.testing.TestService/FullDuplexCall", opts...) if err != nil { return nil, err } x := &testServiceFullDuplexCallClient{stream} return x, nil } type TestService_FullDuplexCallClient interface { Send(*StreamingOutputCallRequest) error Recv() (*StreamingOutputCallResponse, error) grpc.ClientStream } type testServiceFullDuplexCallClient struct { grpc.ClientStream } func (x *testServiceFullDuplexCallClient) Send(m *StreamingOutputCallRequest) error { return x.ClientStream.SendMsg(m) } func (x *testServiceFullDuplexCallClient) Recv() (*StreamingOutputCallResponse, error) { m := new(StreamingOutputCallResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *testServiceClient) HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) { stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[3], c.cc, "/grpc.testing.TestService/HalfDuplexCall", opts...) if err != nil { return nil, err } x := &testServiceHalfDuplexCallClient{stream} return x, nil } type TestService_HalfDuplexCallClient interface { Send(*StreamingOutputCallRequest) error Recv() (*StreamingOutputCallResponse, error) grpc.ClientStream } type testServiceHalfDuplexCallClient struct { grpc.ClientStream } func (x *testServiceHalfDuplexCallClient) Send(m *StreamingOutputCallRequest) error { return x.ClientStream.SendMsg(m) } func (x *testServiceHalfDuplexCallClient) Recv() (*StreamingOutputCallResponse, error) { m := new(StreamingOutputCallResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } // Server API for TestService service type TestServiceServer interface { // One empty request followed by one empty response. EmptyCall(context.Context, *Empty) (*Empty, error) // One request followed by one response. // The server returns the client payload as-is. UnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error) // One request followed by a sequence of responses (streamed download). // The server returns the payload with client desired type and sizes. StreamingOutputCall(*StreamingOutputCallRequest, TestService_StreamingOutputCallServer) error // A sequence of requests followed by one response (streamed upload). // The server returns the aggregated size of client payload as the result. StreamingInputCall(TestService_StreamingInputCallServer) error // A sequence of requests with each request served by the server immediately. // As one request could lead to multiple responses, this interface // demonstrates the idea of full duplexing. FullDuplexCall(TestService_FullDuplexCallServer) error // A sequence of requests followed by a sequence of responses. // The server buffers all the client requests and then serves them in order. A // stream of responses are returned to the client when the server starts with // first request. HalfDuplexCall(TestService_HalfDuplexCallServer) error } func RegisterTestServiceServer(s *grpc.Server, srv TestServiceServer) { s.RegisterService(&_TestService_serviceDesc, srv) } func _TestService_EmptyCall_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { in := new(Empty) if err := codec.Unmarshal(buf, in); err != nil { return nil, err } out, err := srv.(TestServiceServer).EmptyCall(ctx, in) if err != nil { return nil, err } return out, nil } func _TestService_UnaryCall_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { in := new(SimpleRequest) if err := codec.Unmarshal(buf, in); err != nil { return nil, err } out, err := srv.(TestServiceServer).UnaryCall(ctx, in) if err != nil { return nil, err } return out, nil } func _TestService_StreamingOutputCall_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(StreamingOutputCallRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(TestServiceServer).StreamingOutputCall(m, &testServiceStreamingOutputCallServer{stream}) } type TestService_StreamingOutputCallServer interface { Send(*StreamingOutputCallResponse) error grpc.ServerStream } type testServiceStreamingOutputCallServer struct { grpc.ServerStream } func (x *testServiceStreamingOutputCallServer) Send(m *StreamingOutputCallResponse) error { return x.ServerStream.SendMsg(m) } func _TestService_StreamingInputCall_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(TestServiceServer).StreamingInputCall(&testServiceStreamingInputCallServer{stream}) } type TestService_StreamingInputCallServer interface { SendAndClose(*StreamingInputCallResponse) error Recv() (*StreamingInputCallRequest, error) grpc.ServerStream } type testServiceStreamingInputCallServer struct { grpc.ServerStream } func (x *testServiceStreamingInputCallServer) SendAndClose(m *StreamingInputCallResponse) error { return x.ServerStream.SendMsg(m) } func (x *testServiceStreamingInputCallServer) Recv() (*StreamingInputCallRequest, error) { m := new(StreamingInputCallRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func _TestService_FullDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(TestServiceServer).FullDuplexCall(&testServiceFullDuplexCallServer{stream}) } type TestService_FullDuplexCallServer interface { Send(*StreamingOutputCallResponse) error Recv() (*StreamingOutputCallRequest, error) grpc.ServerStream } type testServiceFullDuplexCallServer struct { grpc.ServerStream } func (x *testServiceFullDuplexCallServer) Send(m *StreamingOutputCallResponse) error { return x.ServerStream.SendMsg(m) } func (x *testServiceFullDuplexCallServer) Recv() (*StreamingOutputCallRequest, error) { m := new(StreamingOutputCallRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func _TestService_HalfDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(TestServiceServer).HalfDuplexCall(&testServiceHalfDuplexCallServer{stream}) } type TestService_HalfDuplexCallServer interface { Send(*StreamingOutputCallResponse) error Recv() (*StreamingOutputCallRequest, error) grpc.ServerStream } type testServiceHalfDuplexCallServer struct { grpc.ServerStream } func (x *testServiceHalfDuplexCallServer) Send(m *StreamingOutputCallResponse) error { return x.ServerStream.SendMsg(m) } func (x *testServiceHalfDuplexCallServer) Recv() (*StreamingOutputCallRequest, error) { m := new(StreamingOutputCallRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } var _TestService_serviceDesc = grpc.ServiceDesc{ ServiceName: "grpc.testing.TestService", HandlerType: (*TestServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "EmptyCall", Handler: _TestService_EmptyCall_Handler, }, { MethodName: "UnaryCall", Handler: _TestService_UnaryCall_Handler, }, }, Streams: []grpc.StreamDesc{ { StreamName: "StreamingOutputCall", Handler: _TestService_StreamingOutputCall_Handler, ServerStreams: true, }, { StreamName: "StreamingInputCall", Handler: _TestService_StreamingInputCall_Handler, ClientStreams: true, }, { StreamName: "FullDuplexCall", Handler: _TestService_FullDuplexCall_Handler, ServerStreams: true, ClientStreams: true, }, { StreamName: "HalfDuplexCall", Handler: _TestService_HalfDuplexCall_Handler, ServerStreams: true, ClientStreams: true, }, }, } ================================================ FILE: vendor/google.golang.org/grpc/interop/grpc_testing/test.proto ================================================ // An integration test service that covers all the method signature permutations // of unary/streaming requests/responses. syntax = "proto2"; package grpc.testing; message Empty {} // The type of payload that should be returned. enum PayloadType { // Compressable text format. COMPRESSABLE = 0; // Uncompressable binary format. UNCOMPRESSABLE = 1; // Randomly chosen from all other formats defined in this enum. RANDOM = 2; } // A block of data, to simply increase gRPC message size. message Payload { // The type of data in body. optional PayloadType type = 1; // Primary contents of payload. optional bytes body = 2; } // Unary request. message SimpleRequest { // Desired payload type in the response from the server. // If response_type is RANDOM, server randomly chooses one from other formats. optional PayloadType response_type = 1; // Desired payload size in the response from the server. // If response_type is COMPRESSABLE, this denotes the size before compression. optional int32 response_size = 2; // Optional input payload sent along with the request. optional Payload payload = 3; // Whether SimpleResponse should include username. optional bool fill_username = 4; // Whether SimpleResponse should include OAuth scope. optional bool fill_oauth_scope = 5; } // Unary response, as configured by the request. message SimpleResponse { // Payload to increase message size. optional Payload payload = 1; // The user the request came from, for verifying authentication was // successful when the client expected it. optional string username = 2; // OAuth scope. optional string oauth_scope = 3; } // Client-streaming request. message StreamingInputCallRequest { // Optional input payload sent along with the request. optional Payload payload = 1; // Not expecting any payload from the response. } // Client-streaming response. message StreamingInputCallResponse { // Aggregated size of payloads received from the client. optional int32 aggregated_payload_size = 1; } // Configuration for a particular response. message ResponseParameters { // Desired payload sizes in responses from the server. // If response_type is COMPRESSABLE, this denotes the size before compression. optional int32 size = 1; // Desired interval between consecutive responses in the response stream in // microseconds. optional int32 interval_us = 2; } // Server-streaming request. message StreamingOutputCallRequest { // Desired payload type in the response from the server. // If response_type is RANDOM, the payload from each response in the stream // might be of different types. This is to simulate a mixed type of payload // stream. optional PayloadType response_type = 1; // Configuration for each expected response message. repeated ResponseParameters response_parameters = 2; // Optional input payload sent along with the request. optional Payload payload = 3; } // Server-streaming response, as configured by the request and parameters. message StreamingOutputCallResponse { // Payload to increase response size. optional Payload payload = 1; } // A simple service to test the various types of RPCs and experiment with // performance with various types of payload. service TestService { // One empty request followed by one empty response. rpc EmptyCall(Empty) returns (Empty); // One request followed by one response. // The server returns the client payload as-is. rpc UnaryCall(SimpleRequest) returns (SimpleResponse); // One request followed by a sequence of responses (streamed download). // The server returns the payload with client desired type and sizes. rpc StreamingOutputCall(StreamingOutputCallRequest) returns (stream StreamingOutputCallResponse); // A sequence of requests followed by one response (streamed upload). // The server returns the aggregated size of client payload as the result. rpc StreamingInputCall(stream StreamingInputCallRequest) returns (StreamingInputCallResponse); // A sequence of requests with each request served by the server immediately. // As one request could lead to multiple responses, this interface // demonstrates the idea of full duplexing. rpc FullDuplexCall(stream StreamingOutputCallRequest) returns (stream StreamingOutputCallResponse); // A sequence of requests followed by a sequence of responses. // The server buffers all the client requests and then serves them in order. A // stream of responses are returned to the client when the server starts with // first request. rpc HalfDuplexCall(stream StreamingOutputCallRequest) returns (stream StreamingOutputCallResponse); } ================================================ FILE: vendor/google.golang.org/grpc/interop/server/server.go ================================================ /* * * Copyright 2014, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ package main import ( "flag" "fmt" "io" "net" "strconv" "time" "github.com/golang/protobuf/proto" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" testpb "google.golang.org/grpc/interop/grpc_testing" ) var ( useTLS = flag.Bool("use_tls", false, "Connection uses TLS if true, else plain TCP") certFile = flag.String("tls_cert_file", "testdata/server1.pem", "The TLS cert file") keyFile = flag.String("tls_key_file", "testdata/server1.key", "The TLS key file") port = flag.Int("port", 10000, "The server port") ) type testServer struct { } func (s *testServer) EmptyCall(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { return new(testpb.Empty), nil } func newPayload(t testpb.PayloadType, size int32) (*testpb.Payload, error) { if size < 0 { return nil, fmt.Errorf("requested a response with invalid length %d", size) } body := make([]byte, size) switch t { case testpb.PayloadType_COMPRESSABLE: case testpb.PayloadType_UNCOMPRESSABLE: return nil, fmt.Errorf("payloadType UNCOMPRESSABLE is not supported") default: return nil, fmt.Errorf("unsupported payload type: %d", t) } return &testpb.Payload{ Type: t.Enum(), Body: body, }, nil } func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { pl, err := newPayload(in.GetResponseType(), in.GetResponseSize()) if err != nil { return nil, err } return &testpb.SimpleResponse{ Payload: pl, }, nil } func (s *testServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest, stream testpb.TestService_StreamingOutputCallServer) error { cs := args.GetResponseParameters() for _, c := range cs { if us := c.GetIntervalUs(); us > 0 { time.Sleep(time.Duration(us) * time.Microsecond) } pl, err := newPayload(args.GetResponseType(), c.GetSize()) if err != nil { return err } if err := stream.Send(&testpb.StreamingOutputCallResponse{ Payload: pl, }); err != nil { return err } } return nil } func (s *testServer) StreamingInputCall(stream testpb.TestService_StreamingInputCallServer) error { var sum int for { in, err := stream.Recv() if err == io.EOF { return stream.SendAndClose(&testpb.StreamingInputCallResponse{ AggregatedPayloadSize: proto.Int32(int32(sum)), }) } if err != nil { return err } p := in.GetPayload().GetBody() sum += len(p) } } func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error { for { in, err := stream.Recv() if err == io.EOF { // read done. return nil } if err != nil { return err } cs := in.GetResponseParameters() for _, c := range cs { if us := c.GetIntervalUs(); us > 0 { time.Sleep(time.Duration(us) * time.Microsecond) } pl, err := newPayload(in.GetResponseType(), c.GetSize()) if err != nil { return err } if err := stream.Send(&testpb.StreamingOutputCallResponse{ Payload: pl, }); err != nil { return err } } } } func (s *testServer) HalfDuplexCall(stream testpb.TestService_HalfDuplexCallServer) error { var msgBuf []*testpb.StreamingOutputCallRequest for { in, err := stream.Recv() if err == io.EOF { // read done. break } if err != nil { return err } msgBuf = append(msgBuf, in) } for _, m := range msgBuf { cs := m.GetResponseParameters() for _, c := range cs { if us := c.GetIntervalUs(); us > 0 { time.Sleep(time.Duration(us) * time.Microsecond) } pl, err := newPayload(m.GetResponseType(), c.GetSize()) if err != nil { return err } if err := stream.Send(&testpb.StreamingOutputCallResponse{ Payload: pl, }); err != nil { return err } } } return nil } func main() { flag.Parse() p := strconv.Itoa(*port) lis, err := net.Listen("tcp", ":"+p) if err != nil { grpclog.Fatalf("failed to listen: %v", err) } var opts []grpc.ServerOption if *useTLS { creds, err := credentials.NewServerTLSFromFile(*certFile, *keyFile) if err != nil { grpclog.Fatalf("Failed to generate credentials %v", err) } opts = []grpc.ServerOption{grpc.Creds(creds)} } server := grpc.NewServer(opts...) testpb.RegisterTestServiceServer(server, &testServer{}) server.Serve(lis) } ================================================ FILE: vendor/google.golang.org/grpc/metadata/metadata.go ================================================ /* * * Copyright 2014, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ // Package metadata define the structure of the metadata supported by gRPC library. package metadata import ( "encoding/base64" "fmt" "strings" "golang.org/x/net/context" ) const ( binHdrSuffix = "-bin" ) // grpc-http2 requires ASCII header key and value (more detail can be found in // "Requests" subsection in go/grpc-http2). func isASCII(s string) bool { for _, c := range s { if c > 127 { return false } } return true } // encodeKeyValue encodes key and value qualified for transmission via gRPC. // Transmitting binary headers violates HTTP/2 spec. // TODO(zhaoq): Maybe check if k is ASCII also. func encodeKeyValue(k, v string) (string, string) { if isASCII(v) { return k, v } key := k + binHdrSuffix val := base64.StdEncoding.EncodeToString([]byte(v)) return key, string(val) } // DecodeKeyValue returns the original key and value corresponding to the // encoded data in k, v. func DecodeKeyValue(k, v string) (string, string, error) { if !strings.HasSuffix(k, binHdrSuffix) { return k, v, nil } key := k[:len(k)-len(binHdrSuffix)] val, err := base64.StdEncoding.DecodeString(v) if err != nil { return "", "", err } return key, string(val), nil } // MD is a mapping from metadata keys to values. Users should use the following // two convenience functions New and Pairs to generate MD. type MD map[string]string // New creates a MD from given key-value map. func New(m map[string]string) MD { md := MD{} for k, v := range m { key, val := encodeKeyValue(k, v) md[key] = val } return md } // Pairs returns an MD formed by the mapping of key, value ... // Pairs panics if len(kv) is odd. func Pairs(kv ...string) MD { if len(kv)%2 == 1 { panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) } md := MD{} var k string for i, s := range kv { if i%2 == 0 { k = s continue } key, val := encodeKeyValue(k, s) md[key] = val } return md } // Len returns the number of items in md. func (md MD) Len() int { return len(md) } // Copy returns a copy of md. func (md MD) Copy() MD { out := MD{} for k, v := range md { out[k] = v } return out } type mdKey struct{} // NewContext creates a new context with md attached. func NewContext(ctx context.Context, md MD) context.Context { return context.WithValue(ctx, mdKey{}, md) } // FromContext returns the MD in ctx if it exists. func FromContext(ctx context.Context) (md MD, ok bool) { md, ok = ctx.Value(mdKey{}).(MD) return } ================================================ FILE: vendor/google.golang.org/grpc/rpc_util.go ================================================ /* * * Copyright 2014, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ package grpc import ( "bytes" "encoding/binary" "fmt" "io" "math/rand" "os" "time" "github.com/golang/protobuf/proto" "golang.org/x/net/context" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" "google.golang.org/grpc/transport" ) // Codec defines the interface gRPC uses to encode and decode messages. type Codec interface { // Marshal returns the wire format of v. Marshal(v interface{}) ([]byte, error) // Unmarshal parses the wire format into v. Unmarshal(data []byte, v interface{}) error // String returns the name of the Codec implementation. The returned // string will be used as part of content type in transmission. String() string } // protoCodec is a Codec implemetation with protobuf. It is the default codec for gRPC. type protoCodec struct{} func (protoCodec) Marshal(v interface{}) ([]byte, error) { return proto.Marshal(v.(proto.Message)) } func (protoCodec) Unmarshal(data []byte, v interface{}) error { return proto.Unmarshal(data, v.(proto.Message)) } func (protoCodec) String() string { return "proto" } // CallOption configures a Call before it starts or extracts information from // a Call after it completes. type CallOption interface { // before is called before the call is sent to any server. If before // returns a non-nil error, the RPC fails with that error. before(*callInfo) error // after is called after the call has completed. after cannot return an // error, so any failures should be reported via output parameters. after(*callInfo) } type beforeCall func(c *callInfo) error func (o beforeCall) before(c *callInfo) error { return o(c) } func (o beforeCall) after(c *callInfo) {} type afterCall func(c *callInfo) func (o afterCall) before(c *callInfo) error { return nil } func (o afterCall) after(c *callInfo) { o(c) } // Header returns a CallOptions that retrieves the header metadata // for a unary RPC. func Header(md *metadata.MD) CallOption { return afterCall(func(c *callInfo) { *md = c.headerMD }) } // Trailer returns a CallOptions that retrieves the trailer metadata // for a unary RPC. func Trailer(md *metadata.MD) CallOption { return afterCall(func(c *callInfo) { *md = c.trailerMD }) } // The format of the payload: compressed or not? type payloadFormat uint8 const ( compressionNone payloadFormat = iota // no compression compressionFlate // More formats ) // parser reads complelete gRPC messages from the underlying reader. type parser struct { s io.Reader } // msgFixedHeader defines the header of a gRPC message (go/grpc-wirefmt). type msgFixedHeader struct { T payloadFormat Length uint32 } // recvMsg is to read a complete gRPC message from the stream. It is blocking if // the message has not been complete yet. It returns the message and its type, // EOF is returned with nil msg and 0 pf if the entire stream is done. Other // non-nil error is returned if something is wrong on reading. func (p *parser) recvMsg() (pf payloadFormat, msg []byte, err error) { var hdr msgFixedHeader if err := binary.Read(p.s, binary.BigEndian, &hdr); err != nil { return 0, nil, err } if hdr.Length == 0 { return hdr.T, nil, nil } msg = make([]byte, int(hdr.Length)) if _, err := io.ReadFull(p.s, msg); err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF } return 0, nil, err } return hdr.T, msg, nil } // encode serializes msg and prepends the message header. If msg is nil, it // generates the message header of 0 message length. func encode(c Codec, msg interface{}, pf payloadFormat) ([]byte, error) { var buf bytes.Buffer // Write message fixed header. buf.WriteByte(uint8(pf)) var b []byte var length uint32 if msg != nil { var err error // TODO(zhaoq): optimize to reduce memory alloc and copying. b, err = c.Marshal(msg) if err != nil { return nil, err } length = uint32(len(b)) } var szHdr [4]byte binary.BigEndian.PutUint32(szHdr[:], length) buf.Write(szHdr[:]) buf.Write(b) return buf.Bytes(), nil } func recv(p *parser, c Codec, m interface{}) error { pf, d, err := p.recvMsg() if err != nil { return err } switch pf { case compressionNone: if err := c.Unmarshal(d, m); err != nil { return Errorf(codes.Internal, "grpc: %v", err) } default: return Errorf(codes.Internal, "gprc: compression is not supported yet.") } return nil } // rpcError defines the status from an RPC. type rpcError struct { code codes.Code desc string } func (e rpcError) Error() string { return fmt.Sprintf("rpc error: code = %d desc = %q", e.code, e.desc) } // Code returns the error code for err if it was produced by the rpc system. // Otherwise, it returns codes.Unknown. func Code(err error) codes.Code { if err == nil { return codes.OK } if e, ok := err.(rpcError); ok { return e.code } return codes.Unknown } // Errorf returns an error containing an error code and a description; // Errorf returns nil if c is OK. func Errorf(c codes.Code, format string, a ...interface{}) error { if c == codes.OK { return nil } return rpcError{ code: c, desc: fmt.Sprintf(format, a...), } } // toRPCErr converts an error into a rpcError. func toRPCErr(err error) error { switch e := err.(type) { case transport.StreamError: return rpcError{ code: e.Code, desc: e.Desc, } case transport.ConnectionError: return rpcError{ code: codes.Internal, desc: e.Desc, } } return Errorf(codes.Unknown, "%v", err) } // convertCode converts a standard Go error into its canonical code. Note that // this is only used to translate the error returned by the server applications. func convertCode(err error) codes.Code { switch err { case nil: return codes.OK case io.EOF: return codes.OutOfRange case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF: return codes.FailedPrecondition case os.ErrInvalid: return codes.InvalidArgument case context.Canceled: return codes.Canceled case context.DeadlineExceeded: return codes.DeadlineExceeded } switch { case os.IsExist(err): return codes.AlreadyExists case os.IsNotExist(err): return codes.NotFound case os.IsPermission(err): return codes.PermissionDenied } return codes.Unknown } const ( // how long to wait after the first failure before retrying baseDelay = 1.0 * time.Second // upper bound on backoff delay maxDelay = 120 * time.Second backoffFactor = 2.0 // backoff increases by this factor on each retry backoffRange = 0.4 // backoff is randomized downwards by this factor ) // backoff returns a value in [0, maxDelay] that increases exponentially with // retries, starting from baseDelay. func backoff(retries int) time.Duration { backoff, max := float64(baseDelay), float64(maxDelay) for backoff < max && retries > 0 { backoff = backoff * backoffFactor retries-- } if backoff > max { backoff = max } // Randomize backoff delays so that if a cluster of requests start at // the same time, they won't operate in lockstep. We just subtract up // to 40% so that we obey maxDelay. backoff -= backoff * backoffRange * rand.Float64() if backoff < 0 { return 0 } return time.Duration(backoff) } ================================================ FILE: vendor/google.golang.org/grpc/server.go ================================================ /* * * Copyright 2014, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ package grpc import ( "errors" "fmt" "io" "net" "reflect" "strings" "sync" "golang.org/x/net/context" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" "google.golang.org/grpc/transport" ) type methodHandler func(srv interface{}, ctx context.Context, codec Codec, buf []byte) (interface{}, error) // MethodDesc represents an RPC service's method specification. type MethodDesc struct { MethodName string Handler methodHandler } // ServiceDesc represents an RPC service's specification. type ServiceDesc struct { ServiceName string // The pointer to the service interface. Used to check whether the user // provided implementation satisfies the interface requirements. HandlerType interface{} Methods []MethodDesc Streams []StreamDesc } // service consists of the information of the server serving this service and // the methods in this service. type service struct { server interface{} // the server for service methods md map[string]*MethodDesc sd map[string]*StreamDesc } // Server is a gRPC server to serve RPC requests. type Server struct { opts options mu sync.Mutex lis map[net.Listener]bool conns map[transport.ServerTransport]bool m map[string]*service // service name -> service info } type options struct { creds credentials.Credentials codec Codec maxConcurrentStreams uint32 } // A ServerOption sets options. type ServerOption func(*options) // CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. func CustomCodec(codec Codec) ServerOption { return func(o *options) { o.codec = codec } } // MaxConcurrentStreams returns a ServerOption that will apply a limit on the number // of concurrent streams to each ServerTransport. func MaxConcurrentStreams(n uint32) ServerOption { return func(o *options) { o.maxConcurrentStreams = n } } // Creds returns a ServerOption that sets credentials for server connections. func Creds(c credentials.Credentials) ServerOption { return func(o *options) { o.creds = c } } // NewServer creates a gRPC server which has no service registered and has not // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { var opts options for _, o := range opt { o(&opts) } if opts.codec == nil { // Set the default codec. opts.codec = protoCodec{} } return &Server{ lis: make(map[net.Listener]bool), opts: opts, conns: make(map[transport.ServerTransport]bool), m: make(map[string]*service), } } // RegisterService register a service and its implementation to the gRPC // server. Called from the IDL generated code. This must be called before // invoking Serve. func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { s.mu.Lock() defer s.mu.Unlock() // Does some sanity checks. if _, ok := s.m[sd.ServiceName]; ok { grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) } ht := reflect.TypeOf(sd.HandlerType).Elem() st := reflect.TypeOf(ss) if !st.Implements(ht) { grpclog.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) } srv := &service{ server: ss, md: make(map[string]*MethodDesc), sd: make(map[string]*StreamDesc), } for i := range sd.Methods { d := &sd.Methods[i] srv.md[d.MethodName] = d } for i := range sd.Streams { d := &sd.Streams[i] srv.sd[d.StreamName] = d } s.m[sd.ServiceName] = srv } var ( // ErrServerStopped indicates that the operation is now illegal because of // the server being stopped. ErrServerStopped = errors.New("grpc: the server has been stopped") ) // Serve accepts incoming connections on the listener lis, creating a new // ServerTransport and service goroutine for each. The service goroutines // read gRPC request and then call the registered handlers to reply to them. // Service returns when lis.Accept fails. func (s *Server) Serve(lis net.Listener) error { s.mu.Lock() if s.lis == nil { s.mu.Unlock() return ErrServerStopped } s.lis[lis] = true s.mu.Unlock() defer func() { lis.Close() s.mu.Lock() delete(s.lis, lis) s.mu.Unlock() }() for { c, err := lis.Accept() if err != nil { return err } if creds, ok := s.opts.creds.(credentials.TransportAuthenticator); ok { c, err = creds.ServerHandshake(c) if err != nil { grpclog.Println("grpc: Server.Serve failed to complete security handshake.") continue } } s.mu.Lock() if s.conns == nil { s.mu.Unlock() c.Close() return nil } st, err := transport.NewServerTransport("http2", c, s.opts.maxConcurrentStreams) if err != nil { s.mu.Unlock() c.Close() grpclog.Println("grpc: Server.Serve failed to create ServerTransport: ", err) continue } s.conns[st] = true s.mu.Unlock() go func() { st.HandleStreams(func(stream *transport.Stream) { s.handleStream(st, stream) }) s.mu.Lock() delete(s.conns, st) s.mu.Unlock() }() } } func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, pf payloadFormat, opts *transport.Options) error { p, err := encode(s.opts.codec, msg, pf) if err != nil { // This typically indicates a fatal issue (e.g., memory // corruption or hardware faults) the application program // cannot handle. // // TODO(zhaoq): There exist other options also such as only closing the // faulty stream locally and remotely (Other streams can keep going). Find // the optimal option. grpclog.Fatalf("grpc: Server failed to encode response %v", err) } return t.Write(stream, p, opts) } func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc) { p := &parser{s: stream} for { pf, req, err := p.recvMsg() if err == io.EOF { // The entire stream is done (for unary RPC only). return } if err != nil { switch err := err.(type) { case transport.ConnectionError: // Nothing to do here. case transport.StreamError: if err := t.WriteStatus(stream, err.Code, err.Desc); err != nil { grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err) } default: panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", err, err)) } return } switch pf { case compressionNone: statusCode := codes.OK statusDesc := "" reply, appErr := md.Handler(srv.server, stream.Context(), s.opts.codec, req) if appErr != nil { if err, ok := appErr.(rpcError); ok { statusCode = err.code statusDesc = err.desc } else { statusCode = convertCode(appErr) statusDesc = appErr.Error() } if err := t.WriteStatus(stream, statusCode, statusDesc); err != nil { grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err) } return } opts := &transport.Options{ Last: true, Delay: false, } if err := s.sendResponse(t, stream, reply, compressionNone, opts); err != nil { if _, ok := err.(transport.ConnectionError); ok { return } if e, ok := err.(transport.StreamError); ok { statusCode = e.Code statusDesc = e.Desc } else { statusCode = codes.Unknown statusDesc = err.Error() } } t.WriteStatus(stream, statusCode, statusDesc) default: panic(fmt.Sprintf("payload format to be supported: %d", pf)) } } } func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc) { ss := &serverStream{ t: t, s: stream, p: &parser{s: stream}, codec: s.opts.codec, } if appErr := sd.Handler(srv.server, ss); appErr != nil { if err, ok := appErr.(rpcError); ok { ss.statusCode = err.code ss.statusDesc = err.desc } else { ss.statusCode = convertCode(appErr) ss.statusDesc = appErr.Error() } } t.WriteStatus(ss.s, ss.statusCode, ss.statusDesc) } func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { sm := stream.Method() if sm != "" && sm[0] == '/' { sm = sm[1:] } pos := strings.LastIndex(sm, "/") if pos == -1 { if err := t.WriteStatus(stream, codes.InvalidArgument, fmt.Sprintf("malformed method name: %q", stream.Method())); err != nil { grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) } return } service := sm[:pos] method := sm[pos+1:] srv, ok := s.m[service] if !ok { if err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf("unknown service %v", service)); err != nil { grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) } return } // Unary RPC or Streaming RPC? if md, ok := srv.md[method]; ok { s.processUnaryRPC(t, stream, srv, md) return } if sd, ok := srv.sd[method]; ok { s.processStreamingRPC(t, stream, srv, sd) return } if err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf("unknown method %v", method)); err != nil { grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) } } // Stop stops the gRPC server. Once Stop returns, the server stops accepting // connection requests and closes all the connected connections. func (s *Server) Stop() { s.mu.Lock() listeners := s.lis s.lis = nil cs := s.conns s.conns = nil s.mu.Unlock() for lis := range listeners { lis.Close() } for c := range cs { c.Close() } } // TestingCloseConns closes all exiting transports but keeps s.lis accepting new // connections. This is for test only now. func (s *Server) TestingCloseConns() { s.mu.Lock() for c := range s.conns { c.Close() } s.conns = make(map[transport.ServerTransport]bool) s.mu.Unlock() } // SendHeader sends header metadata. It may be called at most once from a unary // RPC handler. The ctx is the RPC handler's Context or one derived from it. func SendHeader(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil } stream, ok := transport.StreamFromContext(ctx) if !ok { return fmt.Errorf("grpc: failed to fetch the stream from the context %v", ctx) } t := stream.ServerTransport() if t == nil { grpclog.Fatalf("grpc: SendHeader: %v has no ServerTransport to send header metadata.", stream) } return t.WriteHeader(stream, md) } // SetTrailer sets the trailer metadata that will be sent when an RPC returns. // It may be called at most once from a unary RPC handler. The ctx is the RPC // handler's Context or one derived from it. func SetTrailer(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil } stream, ok := transport.StreamFromContext(ctx) if !ok { return fmt.Errorf("grpc: failed to fetch the stream from the context %v", ctx) } return stream.SetTrailer(md) } ================================================ FILE: vendor/google.golang.org/grpc/stream.go ================================================ /* * * Copyright 2014, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ package grpc import ( "errors" "io" "golang.org/x/net/context" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" "google.golang.org/grpc/transport" ) type streamHandler func(srv interface{}, stream ServerStream) error // StreamDesc represents a streaming RPC service's method specification. type StreamDesc struct { StreamName string Handler streamHandler // At least one of these is true. ServerStreams bool ClientStreams bool } // Stream defines the common interface a client or server stream has to satisfy. type Stream interface { // Context returns the context for this stream. Context() context.Context // SendMsg blocks until it sends m, the stream is done or the stream // breaks. // On error, it aborts the stream and returns an RPC status on client // side. On server side, it simply returns the error to the caller. // SendMsg is called by generated code. SendMsg(m interface{}) error // RecvMsg blocks until it receives a message or the stream is // done. On client side, it returns io.EOF when the stream is done. On // any other error, it aborts the streama nd returns an RPC status. On // server side, it simply returns the error to the caller. RecvMsg(m interface{}) error } // ClientStream defines the interface a client stream has to satify. type ClientStream interface { // Header returns the header metedata received from the server if there // is any. It blocks if the metadata is not ready to read. Header() (metadata.MD, error) // Trailer returns the trailer metadata from the server. It must be called // after stream.Recv() returns non-nil error (including io.EOF) for // bi-directional streaming and server streaming or stream.CloseAndRecv() // returns for client streaming in order to receive trailer metadata if // present. Otherwise, it could returns an empty MD even though trailer // is present. Trailer() metadata.MD // CloseSend closes the send direction of the stream. It closes the stream // when non-nil error is met. CloseSend() error Stream } // NewClientStream creates a new Stream for the client side. This is called // by generated code. func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { // TODO(zhaoq): CallOption is omitted. Add support when it is needed. callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, } t, _, err := cc.wait(ctx, 0) if err != nil { return nil, toRPCErr(err) } s, err := t.NewStream(ctx, callHdr) if err != nil { return nil, toRPCErr(err) } return &clientStream{ t: t, s: s, p: &parser{s: s}, desc: desc, codec: cc.dopts.codec, }, nil } // clientStream implements a client side Stream. type clientStream struct { t transport.ClientTransport s *transport.Stream p *parser desc *StreamDesc codec Codec } func (cs *clientStream) Context() context.Context { return cs.s.Context() } func (cs *clientStream) Header() (metadata.MD, error) { m, err := cs.s.Header() if err != nil { if _, ok := err.(transport.ConnectionError); !ok { cs.t.CloseStream(cs.s, err) } } return m, err } func (cs *clientStream) Trailer() metadata.MD { return cs.s.Trailer() } func (cs *clientStream) SendMsg(m interface{}) (err error) { defer func() { if err == nil || err == io.EOF { return } if _, ok := err.(transport.ConnectionError); !ok { cs.t.CloseStream(cs.s, err) } err = toRPCErr(err) }() out, err := encode(cs.codec, m, compressionNone) if err != nil { return transport.StreamErrorf(codes.Internal, "grpc: %v", err) } return cs.t.Write(cs.s, out, &transport.Options{Last: false}) } func (cs *clientStream) RecvMsg(m interface{}) (err error) { err = recv(cs.p, cs.codec, m) if err == nil { if !cs.desc.ClientStreams || cs.desc.ServerStreams { return } // Special handling for client streaming rpc. err = recv(cs.p, cs.codec, m) cs.t.CloseStream(cs.s, err) if err == nil { return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) } if err == io.EOF { if cs.s.StatusCode() == codes.OK { return nil } return Errorf(cs.s.StatusCode(), cs.s.StatusDesc()) } return toRPCErr(err) } if _, ok := err.(transport.ConnectionError); !ok { cs.t.CloseStream(cs.s, err) } if err == io.EOF { if cs.s.StatusCode() == codes.OK { // Returns io.EOF to indicate the end of the stream. return } return Errorf(cs.s.StatusCode(), cs.s.StatusDesc()) } return toRPCErr(err) } func (cs *clientStream) CloseSend() (err error) { err = cs.t.Write(cs.s, nil, &transport.Options{Last: true}) if err == nil || err == io.EOF { return } if _, ok := err.(transport.ConnectionError); !ok { cs.t.CloseStream(cs.s, err) } err = toRPCErr(err) return } // ServerStream defines the interface a server stream has to satisfy. type ServerStream interface { // SendHeader sends the header metadata. It should not be called // after SendProto. It fails if called multiple times or if // called after SendProto. SendHeader(metadata.MD) error // SetTrailer sets the trailer metadata which will be sent with the // RPC status. SetTrailer(metadata.MD) Stream } // serverStream implements a server side Stream. type serverStream struct { t transport.ServerTransport s *transport.Stream p *parser codec Codec statusCode codes.Code statusDesc string } func (ss *serverStream) Context() context.Context { return ss.s.Context() } func (ss *serverStream) SendHeader(md metadata.MD) error { return ss.t.WriteHeader(ss.s, md) } func (ss *serverStream) SetTrailer(md metadata.MD) { if md.Len() == 0 { return } ss.s.SetTrailer(md) return } func (ss *serverStream) SendMsg(m interface{}) error { out, err := encode(ss.codec, m, compressionNone) if err != nil { err = transport.StreamErrorf(codes.Internal, "grpc: %v", err) return err } return ss.t.Write(ss.s, out, &transport.Options{Last: false}) } func (ss *serverStream) RecvMsg(m interface{}) error { return recv(ss.p, ss.codec, m) } ================================================ FILE: vendor/google.golang.org/grpc/test/codec_perf/perf.pb.go ================================================ // Code generated by protoc-gen-go. // source: perf.proto // DO NOT EDIT! /* Package codec_perf is a generated protocol buffer package. It is generated from these files: perf.proto It has these top-level messages: Buffer */ package codec_perf import proto "github.com/golang/protobuf/proto" import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = math.Inf // Buffer is a message that contains a body of bytes that is used to exercise // encoding and decoding overheads. type Buffer struct { Body []byte `protobuf:"bytes,1,opt,name=body" json:"body,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Buffer) Reset() { *m = Buffer{} } func (m *Buffer) String() string { return proto.CompactTextString(m) } func (*Buffer) ProtoMessage() {} func (m *Buffer) GetBody() []byte { if m != nil { return m.Body } return nil } func init() { } ================================================ FILE: vendor/google.golang.org/grpc/test/codec_perf/perf.proto ================================================ // Messages used for performance tests that may not reference grpc directly for // reasons of import cycles. syntax = "proto2"; package codec.perf; // Buffer is a message that contains a body of bytes that is used to exercise // encoding and decoding overheads. message Buffer { optional bytes body = 1; } ================================================ FILE: vendor/google.golang.org/grpc/test/grpc_testing/test.pb.go ================================================ // Code generated by protoc-gen-go. // source: src/google.golang.org/grpc/test/grpc_testing/test.proto // DO NOT EDIT! /* Package grpc_testing is a generated protocol buffer package. It is generated from these files: src/google.golang.org/grpc/test/grpc_testing/test.proto It has these top-level messages: Empty Payload SimpleRequest SimpleResponse StreamingInputCallRequest StreamingInputCallResponse ResponseParameters StreamingOutputCallRequest StreamingOutputCallResponse */ package grpc_testing import proto "github.com/golang/protobuf/proto" import math "math" import ( context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = math.Inf // The type of payload that should be returned. type PayloadType int32 const ( // Compressable text format. PayloadType_COMPRESSABLE PayloadType = 0 // Uncompressable binary format. PayloadType_UNCOMPRESSABLE PayloadType = 1 // Randomly chosen from all other formats defined in this enum. PayloadType_RANDOM PayloadType = 2 ) var PayloadType_name = map[int32]string{ 0: "COMPRESSABLE", 1: "UNCOMPRESSABLE", 2: "RANDOM", } var PayloadType_value = map[string]int32{ "COMPRESSABLE": 0, "UNCOMPRESSABLE": 1, "RANDOM": 2, } func (x PayloadType) Enum() *PayloadType { p := new(PayloadType) *p = x return p } func (x PayloadType) String() string { return proto.EnumName(PayloadType_name, int32(x)) } func (x *PayloadType) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(PayloadType_value, data, "PayloadType") if err != nil { return err } *x = PayloadType(value) return nil } type Empty struct { XXX_unrecognized []byte `json:"-"` } func (m *Empty) Reset() { *m = Empty{} } func (m *Empty) String() string { return proto.CompactTextString(m) } func (*Empty) ProtoMessage() {} // A block of data, to simply increase gRPC message size. type Payload struct { // The type of data in body. Type *PayloadType `protobuf:"varint,1,opt,name=type,enum=grpc.testing.PayloadType" json:"type,omitempty"` // Primary contents of payload. Body []byte `protobuf:"bytes,2,opt,name=body" json:"body,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Payload) Reset() { *m = Payload{} } func (m *Payload) String() string { return proto.CompactTextString(m) } func (*Payload) ProtoMessage() {} func (m *Payload) GetType() PayloadType { if m != nil && m.Type != nil { return *m.Type } return PayloadType_COMPRESSABLE } func (m *Payload) GetBody() []byte { if m != nil { return m.Body } return nil } // Unary request. type SimpleRequest struct { // Desired payload type in the response from the server. // If response_type is RANDOM, server randomly chooses one from other formats. ResponseType *PayloadType `protobuf:"varint,1,opt,name=response_type,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` // Desired payload size in the response from the server. // If response_type is COMPRESSABLE, this denotes the size before compression. ResponseSize *int32 `protobuf:"varint,2,opt,name=response_size" json:"response_size,omitempty"` // Optional input payload sent along with the request. Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"` // Whether SimpleResponse should include username. FillUsername *bool `protobuf:"varint,4,opt,name=fill_username" json:"fill_username,omitempty"` // Whether SimpleResponse should include OAuth scope. FillOauthScope *bool `protobuf:"varint,5,opt,name=fill_oauth_scope" json:"fill_oauth_scope,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *SimpleRequest) Reset() { *m = SimpleRequest{} } func (m *SimpleRequest) String() string { return proto.CompactTextString(m) } func (*SimpleRequest) ProtoMessage() {} func (m *SimpleRequest) GetResponseType() PayloadType { if m != nil && m.ResponseType != nil { return *m.ResponseType } return PayloadType_COMPRESSABLE } func (m *SimpleRequest) GetResponseSize() int32 { if m != nil && m.ResponseSize != nil { return *m.ResponseSize } return 0 } func (m *SimpleRequest) GetPayload() *Payload { if m != nil { return m.Payload } return nil } func (m *SimpleRequest) GetFillUsername() bool { if m != nil && m.FillUsername != nil { return *m.FillUsername } return false } func (m *SimpleRequest) GetFillOauthScope() bool { if m != nil && m.FillOauthScope != nil { return *m.FillOauthScope } return false } // Unary response, as configured by the request. type SimpleResponse struct { // Payload to increase message size. Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` // The user the request came from, for verifying authentication was // successful when the client expected it. Username *string `protobuf:"bytes,2,opt,name=username" json:"username,omitempty"` // OAuth scope. OauthScope *string `protobuf:"bytes,3,opt,name=oauth_scope" json:"oauth_scope,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *SimpleResponse) Reset() { *m = SimpleResponse{} } func (m *SimpleResponse) String() string { return proto.CompactTextString(m) } func (*SimpleResponse) ProtoMessage() {} func (m *SimpleResponse) GetPayload() *Payload { if m != nil { return m.Payload } return nil } func (m *SimpleResponse) GetUsername() string { if m != nil && m.Username != nil { return *m.Username } return "" } func (m *SimpleResponse) GetOauthScope() string { if m != nil && m.OauthScope != nil { return *m.OauthScope } return "" } // Client-streaming request. type StreamingInputCallRequest struct { // Optional input payload sent along with the request. Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *StreamingInputCallRequest) Reset() { *m = StreamingInputCallRequest{} } func (m *StreamingInputCallRequest) String() string { return proto.CompactTextString(m) } func (*StreamingInputCallRequest) ProtoMessage() {} func (m *StreamingInputCallRequest) GetPayload() *Payload { if m != nil { return m.Payload } return nil } // Client-streaming response. type StreamingInputCallResponse struct { // Aggregated size of payloads received from the client. AggregatedPayloadSize *int32 `protobuf:"varint,1,opt,name=aggregated_payload_size" json:"aggregated_payload_size,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *StreamingInputCallResponse) Reset() { *m = StreamingInputCallResponse{} } func (m *StreamingInputCallResponse) String() string { return proto.CompactTextString(m) } func (*StreamingInputCallResponse) ProtoMessage() {} func (m *StreamingInputCallResponse) GetAggregatedPayloadSize() int32 { if m != nil && m.AggregatedPayloadSize != nil { return *m.AggregatedPayloadSize } return 0 } // Configuration for a particular response. type ResponseParameters struct { // Desired payload sizes in responses from the server. // If response_type is COMPRESSABLE, this denotes the size before compression. Size *int32 `protobuf:"varint,1,opt,name=size" json:"size,omitempty"` // Desired interval between consecutive responses in the response stream in // microseconds. IntervalUs *int32 `protobuf:"varint,2,opt,name=interval_us" json:"interval_us,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *ResponseParameters) Reset() { *m = ResponseParameters{} } func (m *ResponseParameters) String() string { return proto.CompactTextString(m) } func (*ResponseParameters) ProtoMessage() {} func (m *ResponseParameters) GetSize() int32 { if m != nil && m.Size != nil { return *m.Size } return 0 } func (m *ResponseParameters) GetIntervalUs() int32 { if m != nil && m.IntervalUs != nil { return *m.IntervalUs } return 0 } // Server-streaming request. type StreamingOutputCallRequest struct { // Desired payload type in the response from the server. // If response_type is RANDOM, the payload from each response in the stream // might be of different types. This is to simulate a mixed type of payload // stream. ResponseType *PayloadType `protobuf:"varint,1,opt,name=response_type,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` // Configuration for each expected response message. ResponseParameters []*ResponseParameters `protobuf:"bytes,2,rep,name=response_parameters" json:"response_parameters,omitempty"` // Optional input payload sent along with the request. Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *StreamingOutputCallRequest) Reset() { *m = StreamingOutputCallRequest{} } func (m *StreamingOutputCallRequest) String() string { return proto.CompactTextString(m) } func (*StreamingOutputCallRequest) ProtoMessage() {} func (m *StreamingOutputCallRequest) GetResponseType() PayloadType { if m != nil && m.ResponseType != nil { return *m.ResponseType } return PayloadType_COMPRESSABLE } func (m *StreamingOutputCallRequest) GetResponseParameters() []*ResponseParameters { if m != nil { return m.ResponseParameters } return nil } func (m *StreamingOutputCallRequest) GetPayload() *Payload { if m != nil { return m.Payload } return nil } // Server-streaming response, as configured by the request and parameters. type StreamingOutputCallResponse struct { // Payload to increase response size. Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *StreamingOutputCallResponse) Reset() { *m = StreamingOutputCallResponse{} } func (m *StreamingOutputCallResponse) String() string { return proto.CompactTextString(m) } func (*StreamingOutputCallResponse) ProtoMessage() {} func (m *StreamingOutputCallResponse) GetPayload() *Payload { if m != nil { return m.Payload } return nil } func init() { proto.RegisterEnum("grpc.testing.PayloadType", PayloadType_name, PayloadType_value) } // Client API for TestService service type TestServiceClient interface { // One empty request followed by one empty response. EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) // One request followed by one response. // The server returns the client payload as-is. UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) // One request followed by a sequence of responses (streamed download). // The server returns the payload with client desired type and sizes. StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) // A sequence of requests followed by one response (streamed upload). // The server returns the aggregated size of client payload as the result. StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) // A sequence of requests with each request served by the server immediately. // As one request could lead to multiple responses, this interface // demonstrates the idea of full duplexing. FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) // A sequence of requests followed by a sequence of responses. // The server buffers all the client requests and then serves them in order. A // stream of responses are returned to the client when the server starts with // first request. HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) } type testServiceClient struct { cc *grpc.ClientConn } func NewTestServiceClient(cc *grpc.ClientConn) TestServiceClient { return &testServiceClient{cc} } func (c *testServiceClient) EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) err := grpc.Invoke(ctx, "/grpc.testing.TestService/EmptyCall", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *testServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { out := new(SimpleResponse) err := grpc.Invoke(ctx, "/grpc.testing.TestService/UnaryCall", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *testServiceClient) StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) { stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[0], c.cc, "/grpc.testing.TestService/StreamingOutputCall", opts...) if err != nil { return nil, err } x := &testServiceStreamingOutputCallClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type TestService_StreamingOutputCallClient interface { Recv() (*StreamingOutputCallResponse, error) grpc.ClientStream } type testServiceStreamingOutputCallClient struct { grpc.ClientStream } func (x *testServiceStreamingOutputCallClient) Recv() (*StreamingOutputCallResponse, error) { m := new(StreamingOutputCallResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *testServiceClient) StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) { stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[1], c.cc, "/grpc.testing.TestService/StreamingInputCall", opts...) if err != nil { return nil, err } x := &testServiceStreamingInputCallClient{stream} return x, nil } type TestService_StreamingInputCallClient interface { Send(*StreamingInputCallRequest) error CloseAndRecv() (*StreamingInputCallResponse, error) grpc.ClientStream } type testServiceStreamingInputCallClient struct { grpc.ClientStream } func (x *testServiceStreamingInputCallClient) Send(m *StreamingInputCallRequest) error { return x.ClientStream.SendMsg(m) } func (x *testServiceStreamingInputCallClient) CloseAndRecv() (*StreamingInputCallResponse, error) { if err := x.ClientStream.CloseSend(); err != nil { return nil, err } m := new(StreamingInputCallResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *testServiceClient) FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) { stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[2], c.cc, "/grpc.testing.TestService/FullDuplexCall", opts...) if err != nil { return nil, err } x := &testServiceFullDuplexCallClient{stream} return x, nil } type TestService_FullDuplexCallClient interface { Send(*StreamingOutputCallRequest) error Recv() (*StreamingOutputCallResponse, error) grpc.ClientStream } type testServiceFullDuplexCallClient struct { grpc.ClientStream } func (x *testServiceFullDuplexCallClient) Send(m *StreamingOutputCallRequest) error { return x.ClientStream.SendMsg(m) } func (x *testServiceFullDuplexCallClient) Recv() (*StreamingOutputCallResponse, error) { m := new(StreamingOutputCallResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *testServiceClient) HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) { stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[3], c.cc, "/grpc.testing.TestService/HalfDuplexCall", opts...) if err != nil { return nil, err } x := &testServiceHalfDuplexCallClient{stream} return x, nil } type TestService_HalfDuplexCallClient interface { Send(*StreamingOutputCallRequest) error Recv() (*StreamingOutputCallResponse, error) grpc.ClientStream } type testServiceHalfDuplexCallClient struct { grpc.ClientStream } func (x *testServiceHalfDuplexCallClient) Send(m *StreamingOutputCallRequest) error { return x.ClientStream.SendMsg(m) } func (x *testServiceHalfDuplexCallClient) Recv() (*StreamingOutputCallResponse, error) { m := new(StreamingOutputCallResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } // Server API for TestService service type TestServiceServer interface { // One empty request followed by one empty response. EmptyCall(context.Context, *Empty) (*Empty, error) // One request followed by one response. // The server returns the client payload as-is. UnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error) // One request followed by a sequence of responses (streamed download). // The server returns the payload with client desired type and sizes. StreamingOutputCall(*StreamingOutputCallRequest, TestService_StreamingOutputCallServer) error // A sequence of requests followed by one response (streamed upload). // The server returns the aggregated size of client payload as the result. StreamingInputCall(TestService_StreamingInputCallServer) error // A sequence of requests with each request served by the server immediately. // As one request could lead to multiple responses, this interface // demonstrates the idea of full duplexing. FullDuplexCall(TestService_FullDuplexCallServer) error // A sequence of requests followed by a sequence of responses. // The server buffers all the client requests and then serves them in order. A // stream of responses are returned to the client when the server starts with // first request. HalfDuplexCall(TestService_HalfDuplexCallServer) error } func RegisterTestServiceServer(s *grpc.Server, srv TestServiceServer) { s.RegisterService(&_TestService_serviceDesc, srv) } func _TestService_EmptyCall_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { in := new(Empty) if err := codec.Unmarshal(buf, in); err != nil { return nil, err } out, err := srv.(TestServiceServer).EmptyCall(ctx, in) if err != nil { return nil, err } return out, nil } func _TestService_UnaryCall_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { in := new(SimpleRequest) if err := codec.Unmarshal(buf, in); err != nil { return nil, err } out, err := srv.(TestServiceServer).UnaryCall(ctx, in) if err != nil { return nil, err } return out, nil } func _TestService_StreamingOutputCall_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(StreamingOutputCallRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(TestServiceServer).StreamingOutputCall(m, &testServiceStreamingOutputCallServer{stream}) } type TestService_StreamingOutputCallServer interface { Send(*StreamingOutputCallResponse) error grpc.ServerStream } type testServiceStreamingOutputCallServer struct { grpc.ServerStream } func (x *testServiceStreamingOutputCallServer) Send(m *StreamingOutputCallResponse) error { return x.ServerStream.SendMsg(m) } func _TestService_StreamingInputCall_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(TestServiceServer).StreamingInputCall(&testServiceStreamingInputCallServer{stream}) } type TestService_StreamingInputCallServer interface { SendAndClose(*StreamingInputCallResponse) error Recv() (*StreamingInputCallRequest, error) grpc.ServerStream } type testServiceStreamingInputCallServer struct { grpc.ServerStream } func (x *testServiceStreamingInputCallServer) SendAndClose(m *StreamingInputCallResponse) error { return x.ServerStream.SendMsg(m) } func (x *testServiceStreamingInputCallServer) Recv() (*StreamingInputCallRequest, error) { m := new(StreamingInputCallRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func _TestService_FullDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(TestServiceServer).FullDuplexCall(&testServiceFullDuplexCallServer{stream}) } type TestService_FullDuplexCallServer interface { Send(*StreamingOutputCallResponse) error Recv() (*StreamingOutputCallRequest, error) grpc.ServerStream } type testServiceFullDuplexCallServer struct { grpc.ServerStream } func (x *testServiceFullDuplexCallServer) Send(m *StreamingOutputCallResponse) error { return x.ServerStream.SendMsg(m) } func (x *testServiceFullDuplexCallServer) Recv() (*StreamingOutputCallRequest, error) { m := new(StreamingOutputCallRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func _TestService_HalfDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(TestServiceServer).HalfDuplexCall(&testServiceHalfDuplexCallServer{stream}) } type TestService_HalfDuplexCallServer interface { Send(*StreamingOutputCallResponse) error Recv() (*StreamingOutputCallRequest, error) grpc.ServerStream } type testServiceHalfDuplexCallServer struct { grpc.ServerStream } func (x *testServiceHalfDuplexCallServer) Send(m *StreamingOutputCallResponse) error { return x.ServerStream.SendMsg(m) } func (x *testServiceHalfDuplexCallServer) Recv() (*StreamingOutputCallRequest, error) { m := new(StreamingOutputCallRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } var _TestService_serviceDesc = grpc.ServiceDesc{ ServiceName: "grpc.testing.TestService", HandlerType: (*TestServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "EmptyCall", Handler: _TestService_EmptyCall_Handler, }, { MethodName: "UnaryCall", Handler: _TestService_UnaryCall_Handler, }, }, Streams: []grpc.StreamDesc{ { StreamName: "StreamingOutputCall", Handler: _TestService_StreamingOutputCall_Handler, ServerStreams: true, }, { StreamName: "StreamingInputCall", Handler: _TestService_StreamingInputCall_Handler, ClientStreams: true, }, { StreamName: "FullDuplexCall", Handler: _TestService_FullDuplexCall_Handler, ServerStreams: true, ClientStreams: true, }, { StreamName: "HalfDuplexCall", Handler: _TestService_HalfDuplexCall_Handler, ServerStreams: true, ClientStreams: true, }, }, } ================================================ FILE: vendor/google.golang.org/grpc/test/grpc_testing/test.proto ================================================ // An integration test service that covers all the method signature permutations // of unary/streaming requests/responses. syntax = "proto2"; package grpc.testing; message Empty {} // The type of payload that should be returned. enum PayloadType { // Compressable text format. COMPRESSABLE = 0; // Uncompressable binary format. UNCOMPRESSABLE = 1; // Randomly chosen from all other formats defined in this enum. RANDOM = 2; } // A block of data, to simply increase gRPC message size. message Payload { // The type of data in body. optional PayloadType type = 1; // Primary contents of payload. optional bytes body = 2; } // Unary request. message SimpleRequest { // Desired payload type in the response from the server. // If response_type is RANDOM, server randomly chooses one from other formats. optional PayloadType response_type = 1; // Desired payload size in the response from the server. // If response_type is COMPRESSABLE, this denotes the size before compression. optional int32 response_size = 2; // Optional input payload sent along with the request. optional Payload payload = 3; // Whether SimpleResponse should include username. optional bool fill_username = 4; // Whether SimpleResponse should include OAuth scope. optional bool fill_oauth_scope = 5; } // Unary response, as configured by the request. message SimpleResponse { // Payload to increase message size. optional Payload payload = 1; // The user the request came from, for verifying authentication was // successful when the client expected it. optional string username = 2; // OAuth scope. optional string oauth_scope = 3; } // Client-streaming request. message StreamingInputCallRequest { // Optional input payload sent along with the request. optional Payload payload = 1; // Not expecting any payload from the response. } // Client-streaming response. message StreamingInputCallResponse { // Aggregated size of payloads received from the client. optional int32 aggregated_payload_size = 1; } // Configuration for a particular response. message ResponseParameters { // Desired payload sizes in responses from the server. // If response_type is COMPRESSABLE, this denotes the size before compression. optional int32 size = 1; // Desired interval between consecutive responses in the response stream in // microseconds. optional int32 interval_us = 2; } // Server-streaming request. message StreamingOutputCallRequest { // Desired payload type in the response from the server. // If response_type is RANDOM, the payload from each response in the stream // might be of different types. This is to simulate a mixed type of payload // stream. optional PayloadType response_type = 1; // Configuration for each expected response message. repeated ResponseParameters response_parameters = 2; // Optional input payload sent along with the request. optional Payload payload = 3; } // Server-streaming response, as configured by the request and parameters. message StreamingOutputCallResponse { // Payload to increase response size. optional Payload payload = 1; } // A simple service to test the various types of RPCs and experiment with // performance with various types of payload. service TestService { // One empty request followed by one empty response. rpc EmptyCall(Empty) returns (Empty); // One request followed by one response. // The server returns the client payload as-is. rpc UnaryCall(SimpleRequest) returns (SimpleResponse); // One request followed by a sequence of responses (streamed download). // The server returns the payload with client desired type and sizes. rpc StreamingOutputCall(StreamingOutputCallRequest) returns (stream StreamingOutputCallResponse); // A sequence of requests followed by one response (streamed upload). // The server returns the aggregated size of client payload as the result. rpc StreamingInputCall(stream StreamingInputCallRequest) returns (StreamingInputCallResponse); // A sequence of requests with each request served by the server immediately. // As one request could lead to multiple responses, this interface // demonstrates the idea of full duplexing. rpc FullDuplexCall(stream StreamingOutputCallRequest) returns (stream StreamingOutputCallResponse); // A sequence of requests followed by a sequence of responses. // The server buffers all the client requests and then serves them in order. A // stream of responses are returned to the client when the server starts with // first request. rpc HalfDuplexCall(stream StreamingOutputCallRequest) returns (stream StreamingOutputCallResponse); } ================================================ FILE: vendor/google.golang.org/grpc/transport/control.go ================================================ /* * * Copyright 2014, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ package transport import ( "fmt" "sync" "github.com/bradfitz/http2" ) const ( // The default value of flow control window size in HTTP2 spec. defaultWindowSize = 65535 // The initial window size for flow control. initialWindowSize = defaultWindowSize // for an RPC initialConnWindowSize = defaultWindowSize * 16 // for a connection ) // The following defines various control items which could flow through // the control buffer of transport. They represent different aspects of // control tasks, e.g., flow control, settings, streaming resetting, etc. type windowUpdate struct { streamID uint32 increment uint32 } func (windowUpdate) isItem() bool { return true } type settings struct { ack bool setting []http2.Setting } func (settings) isItem() bool { return true } type resetStream struct { streamID uint32 code http2.ErrCode } func (resetStream) isItem() bool { return true } type flushIO struct { } func (flushIO) isItem() bool { return true } type ping struct { ack bool } func (ping) isItem() bool { return true } // quotaPool is a pool which accumulates the quota and sends it to acquire() // when it is available. type quotaPool struct { c chan int mu sync.Mutex quota int } // newQuotaPool creates a quotaPool which has quota q available to consume. func newQuotaPool(q int) *quotaPool { qb := "aPool{c: make(chan int, 1)} qb.c <- q return qb } // add adds n to the available quota and tries to send it on acquire. func (qb *quotaPool) add(n int) { qb.mu.Lock() defer qb.mu.Unlock() qb.quota += n if qb.quota <= 0 { return } select { case qb.c <- qb.quota: qb.quota = 0 default: } } // cancel cancels the pending quota sent on acquire, if any. func (qb *quotaPool) cancel() { qb.mu.Lock() defer qb.mu.Unlock() select { case n := <-qb.c: qb.quota += n default: } } // reset cancels the pending quota sent on acquired, incremented by v and sends // it back on acquire. func (qb *quotaPool) reset(v int) { qb.mu.Lock() defer qb.mu.Unlock() select { case n := <-qb.c: qb.quota += n default: } qb.quota += v if qb.quota <= 0 { return } select { case qb.c <- qb.quota: qb.quota = 0 default: } } // acquire returns the channel on which available quota amounts are sent. func (qb *quotaPool) acquire() <-chan int { return qb.c } // inFlow deals with inbound flow control type inFlow struct { // The inbound flow control limit for pending data. limit uint32 // conn points to the shared connection-level inFlow that is shared // by all streams on that conn. It is nil for the inFlow on the conn // directly. conn *inFlow mu sync.Mutex // pendingData is the overall data which have been received but not been // consumed by applications. pendingData uint32 // The amount of data the application has consumed but grpc has not sent // window update for them. Used to reduce window update frequency. pendingUpdate uint32 } // onData is invoked when some data frame is received. It increments not only its // own pendingData but also that of the associated connection-level flow. func (f *inFlow) onData(n uint32) error { if n == 0 { return nil } f.mu.Lock() defer f.mu.Unlock() if f.pendingData+f.pendingUpdate+n > f.limit { return fmt.Errorf("recieved %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate+n, f.limit) } if f.conn != nil { if err := f.conn.onData(n); err != nil { return ConnectionErrorf("%v", err) } } f.pendingData += n return nil } // connOnRead updates the connection level states when the application consumes data. func (f *inFlow) connOnRead(n uint32) uint32 { if n == 0 || f.conn != nil { return 0 } f.mu.Lock() defer f.mu.Unlock() f.pendingData -= n f.pendingUpdate += n if f.pendingUpdate >= f.limit/4 { ret := f.pendingUpdate f.pendingUpdate = 0 return ret } return 0 } // onRead is invoked when the application reads the data. It returns the window updates // for both stream and connection level. func (f *inFlow) onRead(n uint32) (swu, cwu uint32) { if n == 0 { return } f.mu.Lock() defer f.mu.Unlock() if f.pendingData == 0 { // pendingData has been adjusted by restoreConn. return } f.pendingData -= n f.pendingUpdate += n if f.pendingUpdate >= f.limit/4 { swu = f.pendingUpdate f.pendingUpdate = 0 } cwu = f.conn.connOnRead(n) return } // restoreConn is invoked when a stream is terminated. It removes its stake in // the connection-level flow and resets its own state. func (f *inFlow) restoreConn() uint32 { if f.conn == nil { return 0 } f.mu.Lock() defer f.mu.Unlock() n := f.pendingData f.pendingData = 0 f.pendingUpdate = 0 return f.conn.connOnRead(n) } ================================================ FILE: vendor/google.golang.org/grpc/transport/http2_client.go ================================================ /* * * Copyright 2014, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ package transport import ( "bytes" "errors" "io" "math" "net" "sync" "time" "github.com/bradfitz/http2" "github.com/bradfitz/http2/hpack" "golang.org/x/net/context" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" ) // http2Client implements the ClientTransport interface with HTTP2. type http2Client struct { target string // server name/addr conn net.Conn // underlying communication channel nextID uint32 // the next stream ID to be used // writableChan synchronizes write access to the transport. // A writer acquires the write lock by sending a value on writableChan // and releases it by receiving from writableChan. writableChan chan int // shutdownChan is closed when Close is called. // Blocking operations should select on shutdownChan to avoid // blocking forever after Close. // TODO(zhaoq): Maybe have a channel context? shutdownChan chan struct{} // errorChan is closed to notify the I/O error to the caller. errorChan chan struct{} framer *framer hBuf *bytes.Buffer // the buffer for HPACK encoding hEnc *hpack.Encoder // HPACK encoder // controlBuf delivers all the control related tasks (e.g., window // updates, reset streams, and various settings) to the controller. controlBuf *recvBuffer fc *inFlow // sendQuotaPool provides flow control to outbound message. sendQuotaPool *quotaPool // streamsQuota limits the max number of concurrent streams. streamsQuota *quotaPool // The scheme used: https if TLS is on, http otherwise. scheme string authCreds []credentials.Credentials mu sync.Mutex // guard the following variables state transportState // the state of underlying connection activeStreams map[uint32]*Stream // The max number of concurrent streams maxStreams int // the per-stream outbound flow control window size set by the peer. streamSendQuota uint32 } // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err error) { if opts.Dialer == nil { // Set the default Dialer. opts.Dialer = func(addr string, timeout time.Duration) (net.Conn, error) { return net.DialTimeout("tcp", addr, timeout) } } scheme := "http" startT := time.Now() timeout := opts.Timeout conn, connErr := opts.Dialer(addr, timeout) if connErr != nil { return nil, ConnectionErrorf("transport: %v", connErr) } for _, c := range opts.AuthOptions { if ccreds, ok := c.(credentials.TransportAuthenticator); ok { scheme = "https" // TODO(zhaoq): Now the first TransportAuthenticator is used if there are // multiple ones provided. Revisit this if it is not appropriate. Probably // place the ClientTransport construction into a separate function to make // things clear. if timeout > 0 { timeout -= time.Since(startT) } conn, connErr = ccreds.ClientHandshake(addr, conn, timeout) break } } if connErr != nil { return nil, ConnectionErrorf("transport: %v", connErr) } defer func() { if err != nil { conn.Close() } }() // Send connection preface to server. n, err := conn.Write(clientPreface) if err != nil { return nil, ConnectionErrorf("transport: %v", err) } if n != len(clientPreface) { return nil, ConnectionErrorf("transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) } framer := newFramer(conn) if initialWindowSize != defaultWindowSize { err = framer.writeSettings(true, http2.Setting{http2.SettingInitialWindowSize, uint32(initialWindowSize)}) } else { err = framer.writeSettings(true) } if err != nil { return nil, ConnectionErrorf("transport: %v", err) } // Adjust the connection flow control window if needed. if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 { if err := framer.writeWindowUpdate(true, 0, delta); err != nil { return nil, ConnectionErrorf("transport: %v", err) } } var buf bytes.Buffer t := &http2Client{ target: addr, conn: conn, // The client initiated stream id is odd starting from 1. nextID: 1, writableChan: make(chan int, 1), shutdownChan: make(chan struct{}), errorChan: make(chan struct{}), framer: framer, hBuf: &buf, hEnc: hpack.NewEncoder(&buf), controlBuf: newRecvBuffer(), fc: &inFlow{limit: initialConnWindowSize}, sendQuotaPool: newQuotaPool(defaultWindowSize), scheme: scheme, state: reachable, activeStreams: make(map[uint32]*Stream), authCreds: opts.AuthOptions, maxStreams: math.MaxInt32, streamSendQuota: defaultWindowSize, } go t.controller() t.writableChan <- 0 // Start the reader goroutine for incoming message. The threading model // on receiving is that each transport has a dedicated goroutine which // reads HTTP2 frame from network. Then it dispatches the frame to the // corresponding stream entity. go t.reader() return t, nil } func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr, sq bool) *Stream { fc := &inFlow{ limit: initialWindowSize, conn: t.fc, } // TODO(zhaoq): Handle uint32 overflow of Stream.id. s := &Stream{ id: t.nextID, method: callHdr.Method, buf: newRecvBuffer(), updateStreams: sq, fc: fc, sendQuotaPool: newQuotaPool(int(t.streamSendQuota)), headerChan: make(chan struct{}), } t.nextID += 2 s.windowHandler = func(n int) { t.updateWindow(s, uint32(n)) } // Make a stream be able to cancel the pending operations by itself. s.ctx, s.cancel = context.WithCancel(ctx) s.dec = &recvBufferReader{ ctx: s.ctx, recv: s.buf, } return s } // NewStream creates a stream and register it into the transport as "active" // streams. func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { // Record the timeout value on the context. var timeout time.Duration if dl, ok := ctx.Deadline(); ok { timeout = dl.Sub(time.Now()) if timeout <= 0 { return nil, ContextErr(context.DeadlineExceeded) } } authData := make(map[string]string) for _, c := range t.authCreds { data, err := c.GetRequestMetadata(ctx) if err != nil { return nil, StreamErrorf(codes.InvalidArgument, "transport: %v", err) } for k, v := range data { authData[k] = v } } t.mu.Lock() if t.state != reachable { t.mu.Unlock() return nil, ErrConnClosing } checkStreamsQuota := t.streamsQuota != nil t.mu.Unlock() if checkStreamsQuota { sq, err := wait(ctx, t.shutdownChan, t.streamsQuota.acquire()) if err != nil { return nil, err } // Returns the quota balance back. if sq > 1 { t.streamsQuota.add(sq - 1) } } if _, err := wait(ctx, t.shutdownChan, t.writableChan); err != nil { // t.streamsQuota will be updated when t.CloseStream is invoked. return nil, err } t.mu.Lock() s := t.newStream(ctx, callHdr, checkStreamsQuota) t.activeStreams[s.id] = s t.mu.Unlock() // HPACK encodes various headers. Note that once WriteField(...) is // called, the corresponding headers/continuation frame has to be sent // because hpack.Encoder is stateful. t.hBuf.Reset() t.hEnc.WriteField(hpack.HeaderField{Name: ":method", Value: "POST"}) t.hEnc.WriteField(hpack.HeaderField{Name: ":scheme", Value: t.scheme}) t.hEnc.WriteField(hpack.HeaderField{Name: ":path", Value: callHdr.Method}) t.hEnc.WriteField(hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) t.hEnc.WriteField(hpack.HeaderField{Name: "te", Value: "trailers"}) if timeout > 0 { t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-timeout", Value: timeoutEncode(timeout)}) } for k, v := range authData { t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) } var ( hasMD bool endHeaders bool ) if md, ok := metadata.FromContext(ctx); ok { hasMD = true for k, v := range md { t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) } } first := true // Sends the headers in a single batch even when they span multiple frames. for !endHeaders { size := t.hBuf.Len() if size > http2MaxFrameLen { size = http2MaxFrameLen } else { endHeaders = true } if first { // Sends a HeadersFrame to server to start a new stream. p := http2.HeadersFrameParam{ StreamID: s.id, BlockFragment: t.hBuf.Next(size), EndStream: false, EndHeaders: endHeaders, } // Do a force flush for the buffered frames iff it is the last headers frame // and there is header metadata to be sent. Otherwise, there is flushing until // the corresponding data frame is written. err = t.framer.writeHeaders(hasMD && endHeaders, p) first = false } else { // Sends Continuation frames for the leftover headers. err = t.framer.writeContinuation(hasMD && endHeaders, s.id, endHeaders, t.hBuf.Next(size)) } if err != nil { t.notifyError(err) return nil, ConnectionErrorf("transport: %v", err) } } t.writableChan <- 0 return s, nil } // CloseStream clears the footprint of a stream when the stream is not needed any more. // This must not be executed in reader's goroutine. func (t *http2Client) CloseStream(s *Stream, err error) { t.mu.Lock() delete(t.activeStreams, s.id) t.mu.Unlock() if s.updateStreams { t.streamsQuota.add(1) } s.mu.Lock() if q := s.fc.restoreConn(); q > 0 { t.controlBuf.put(&windowUpdate{0, q}) } if s.state == streamDone { s.mu.Unlock() return } if !s.headerDone { close(s.headerChan) s.headerDone = true } s.state = streamDone s.mu.Unlock() // In case stream sending and receiving are invoked in separate // goroutines (e.g., bi-directional streaming), the caller needs // to call cancel on the stream to interrupt the blocking on // other goroutines. s.cancel() if _, ok := err.(StreamError); ok { t.controlBuf.put(&resetStream{s.id, http2.ErrCodeCancel}) } } // Close kicks off the shutdown process of the transport. This should be called // only once on a transport. Once it is called, the transport should not be // accessed any more. func (t *http2Client) Close() (err error) { t.mu.Lock() if t.state == closing { t.mu.Unlock() return errors.New("transport: Close() was already called") } t.state = closing t.mu.Unlock() close(t.shutdownChan) err = t.conn.Close() t.mu.Lock() streams := t.activeStreams t.activeStreams = nil t.mu.Unlock() // Notify all active streams. for _, s := range streams { s.mu.Lock() if !s.headerDone { close(s.headerChan) s.headerDone = true } s.mu.Unlock() s.write(recvMsg{err: ErrConnClosing}) } return } // Write formats the data into HTTP2 data frame(s) and sends it out. The caller // should proceed only if Write returns nil. // TODO(zhaoq): opts.Delay is ignored in this implementation. Support it later // if it improves the performance. func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error { r := bytes.NewBuffer(data) for { var p []byte if r.Len() > 0 { size := http2MaxFrameLen s.sendQuotaPool.add(0) // Wait until the stream has some quota to send the data. sq, err := wait(s.ctx, t.shutdownChan, s.sendQuotaPool.acquire()) if err != nil { return err } t.sendQuotaPool.add(0) // Wait until the transport has some quota to send the data. tq, err := wait(s.ctx, t.shutdownChan, t.sendQuotaPool.acquire()) if err != nil { if _, ok := err.(StreamError); ok { t.sendQuotaPool.cancel() } return err } if sq < size { size = sq } if tq < size { size = tq } p = r.Next(size) ps := len(p) if ps < sq { // Overbooked stream quota. Return it back. s.sendQuotaPool.add(sq - ps) } if ps < tq { // Overbooked transport quota. Return it back. t.sendQuotaPool.add(tq - ps) } } var ( endStream bool forceFlush bool ) if opts.Last && r.Len() == 0 { endStream = true } // Indicate there is a writer who is about to write a data frame. t.framer.adjustNumWriters(1) // Got some quota. Try to acquire writing privilege on the transport. if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { if t.framer.adjustNumWriters(-1) == 0 { // This writer is the last one in this batch and has the // responsibility to flush the buffered frames. It queues // a flush request to controlBuf instead of flushing directly // in order to avoid the race with other writing or flushing. t.controlBuf.put(&flushIO{}) } return err } if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 { // Do a force flush iff this is last frame for the entire gRPC message // and the caller is the only writer at this moment. forceFlush = true } // If WriteData fails, all the pending streams will be handled // by http2Client.Close(). No explicit CloseStream() needs to be // invoked. if err := t.framer.writeData(forceFlush, s.id, endStream, p); err != nil { t.notifyError(err) return ConnectionErrorf("transport: %v", err) } if t.framer.adjustNumWriters(-1) == 0 { t.framer.flushWrite() } t.writableChan <- 0 if r.Len() == 0 { break } } if !opts.Last { return nil } s.mu.Lock() if s.state != streamDone { if s.state == streamReadDone { s.state = streamDone } else { s.state = streamWriteDone } } s.mu.Unlock() return nil } func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) { t.mu.Lock() defer t.mu.Unlock() if t.activeStreams == nil { // The transport is closing. return nil, false } if s, ok := t.activeStreams[f.Header().StreamID]; ok { return s, true } return nil, false } // updateWindow adjusts the inbound quota for the stream and the transport. // Window updates will deliver to the controller for sending when // the cumulative quota exceeds the corresponding threshold. func (t *http2Client) updateWindow(s *Stream, n uint32) { swu, cwu := s.fc.onRead(n) if swu > 0 { t.controlBuf.put(&windowUpdate{s.id, swu}) } if cwu > 0 { t.controlBuf.put(&windowUpdate{0, cwu}) } } func (t *http2Client) handleData(f *http2.DataFrame) { // Select the right stream to dispatch. s, ok := t.getStream(f) if !ok { return } size := len(f.Data()) if err := s.fc.onData(uint32(size)); err != nil { if _, ok := err.(ConnectionError); ok { t.notifyError(err) return } s.mu.Lock() if s.state == streamDone { s.mu.Unlock() return } s.state = streamDone s.statusCode = codes.Internal s.statusDesc = err.Error() s.mu.Unlock() s.write(recvMsg{err: io.EOF}) t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) return } // TODO(bradfitz, zhaoq): A copy is required here because there is no // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? data := make([]byte, size) copy(data, f.Data()) s.write(recvMsg{data: data}) } func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { s, ok := t.getStream(f) if !ok { return } s.mu.Lock() if s.state == streamDone { s.mu.Unlock() return } s.state = streamDone s.statusCode, ok = http2RSTErrConvTab[http2.ErrCode(f.ErrCode)] if !ok { grpclog.Println("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error ", f.ErrCode) } s.mu.Unlock() s.write(recvMsg{err: io.EOF}) } func (t *http2Client) handleSettings(f *http2.SettingsFrame) { if f.IsAck() { return } f.ForeachSetting(func(s http2.Setting) error { if v, ok := f.Value(s.ID); ok { switch s.ID { case http2.SettingMaxConcurrentStreams: // TODO(zhaoq): This is a hack to avoid significant refactoring of the // code to deal with the unrealistic int32 overflow. Probably will try // to find a better way to handle this later. if v > math.MaxInt32 { v = math.MaxInt32 } t.mu.Lock() reset := t.streamsQuota != nil if !reset { t.streamsQuota = newQuotaPool(int(v)) } ms := t.maxStreams t.maxStreams = int(v) t.mu.Unlock() if reset { t.streamsQuota.reset(int(v) - ms) } case http2.SettingInitialWindowSize: t.mu.Lock() for _, s := range t.activeStreams { // Adjust the sending quota for each s. s.sendQuotaPool.reset(int(v - t.streamSendQuota)) } t.streamSendQuota = v t.mu.Unlock() } } return nil }) t.controlBuf.put(&settings{ack: true}) } func (t *http2Client) handlePing(f *http2.PingFrame) { t.controlBuf.put(&ping{true}) } func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { // TODO(zhaoq): GoAwayFrame handler to be implemented" } func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { id := f.Header().StreamID incr := f.Increment if id == 0 { t.sendQuotaPool.add(int(incr)) return } if s, ok := t.getStream(f); ok { s.sendQuotaPool.add(int(incr)) } } // operateHeader takes action on the decoded headers. It returns the current // stream if there are remaining headers on the wire (in the following // Continuation frame). func (t *http2Client) operateHeaders(hDec *hpackDecoder, s *Stream, frame headerFrame, endStream bool) (pendingStream *Stream) { defer func() { if pendingStream == nil { hDec.state = decodeState{} } }() endHeaders, err := hDec.decodeClientHTTP2Headers(frame) if s == nil { // s has been closed. return nil } if err != nil { s.write(recvMsg{err: err}) // Something wrong. Stops reading even when there is remaining. return nil } if !endHeaders { return s } s.mu.Lock() if !s.headerDone { if !endStream && len(hDec.state.mdata) > 0 { s.header = hDec.state.mdata } close(s.headerChan) s.headerDone = true } if !endStream || s.state == streamDone { s.mu.Unlock() return nil } if len(hDec.state.mdata) > 0 { s.trailer = hDec.state.mdata } s.state = streamDone s.statusCode = hDec.state.statusCode s.statusDesc = hDec.state.statusDesc s.mu.Unlock() s.write(recvMsg{err: io.EOF}) return nil } // reader runs as a separate goroutine in charge of reading data from network // connection. // // TODO(zhaoq): currently one reader per transport. Investigate whether this is // optimal. // TODO(zhaoq): Check the validity of the incoming frame sequence. func (t *http2Client) reader() { // Check the validity of server preface. frame, err := t.framer.readFrame() if err != nil { t.notifyError(err) return } sf, ok := frame.(*http2.SettingsFrame) if !ok { t.notifyError(err) return } t.handleSettings(sf) hDec := newHPACKDecoder() var curStream *Stream // loop to keep reading incoming messages on this transport. for { frame, err := t.framer.readFrame() if err != nil { t.notifyError(err) return } switch frame := frame.(type) { case *http2.HeadersFrame: // operateHeaders has to be invoked regardless the value of curStream // because the HPACK decoder needs to be updated using the received // headers. curStream, _ = t.getStream(frame) endStream := frame.Header().Flags.Has(http2.FlagHeadersEndStream) curStream = t.operateHeaders(hDec, curStream, frame, endStream) case *http2.ContinuationFrame: curStream = t.operateHeaders(hDec, curStream, frame, false) case *http2.DataFrame: t.handleData(frame) case *http2.RSTStreamFrame: t.handleRSTStream(frame) case *http2.SettingsFrame: t.handleSettings(frame) case *http2.PingFrame: t.handlePing(frame) case *http2.GoAwayFrame: t.handleGoAway(frame) case *http2.WindowUpdateFrame: t.handleWindowUpdate(frame) default: grpclog.Printf("transport: http2Client.reader got unhandled frame type %v.", frame) } } } // controller running in a separate goroutine takes charge of sending control // frames (e.g., window update, reset stream, setting, etc.) to the server. func (t *http2Client) controller() { for { select { case i := <-t.controlBuf.get(): t.controlBuf.load() select { case <-t.writableChan: switch i := i.(type) { case *windowUpdate: t.framer.writeWindowUpdate(true, i.streamID, i.increment) case *settings: if i.ack { t.framer.writeSettingsAck(true) } else { t.framer.writeSettings(true, i.setting...) } case *resetStream: t.framer.writeRSTStream(true, i.streamID, i.code) case *flushIO: t.framer.flushWrite() case *ping: // TODO(zhaoq): Ack with all-0 data now. will change to some // meaningful content when this is actually in use. t.framer.writePing(true, i.ack, [8]byte{}) default: grpclog.Printf("transport: http2Client.controller got unexpected item type %v\n", i) } t.writableChan <- 0 continue case <-t.shutdownChan: return } case <-t.shutdownChan: return } } } func (t *http2Client) Error() <-chan struct{} { return t.errorChan } func (t *http2Client) notifyError(err error) { t.mu.Lock() defer t.mu.Unlock() // make sure t.errorChan is closed only once. if t.state == reachable { t.state = unreachable close(t.errorChan) grpclog.Printf("transport: http2Client.notifyError got notified that the client transport was broken %v.", err) } } ================================================ FILE: vendor/google.golang.org/grpc/transport/http2_server.go ================================================ /* * * Copyright 2014, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ package transport import ( "bytes" "errors" "io" "math" "net" "strconv" "sync" "github.com/bradfitz/http2" "github.com/bradfitz/http2/hpack" "golang.org/x/net/context" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" ) // ErrIllegalHeaderWrite indicates that setting header is illegal because of // the stream's state. var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") // http2Server implements the ServerTransport interface with HTTP2. type http2Server struct { conn net.Conn maxStreamID uint32 // max stream ID ever seen // writableChan synchronizes write access to the transport. // A writer acquires the write lock by sending a value on writableChan // and releases it by receiving from writableChan. writableChan chan int // shutdownChan is closed when Close is called. // Blocking operations should select on shutdownChan to avoid // blocking forever after Close. shutdownChan chan struct{} framer *framer hBuf *bytes.Buffer // the buffer for HPACK encoding hEnc *hpack.Encoder // HPACK encoder // The max number of concurrent streams. maxStreams uint32 // controlBuf delivers all the control related tasks (e.g., window // updates, reset streams, and various settings) to the controller. controlBuf *recvBuffer fc *inFlow // sendQuotaPool provides flow control to outbound message. sendQuotaPool *quotaPool mu sync.Mutex // guard the following state transportState activeStreams map[uint32]*Stream // the per-stream outbound flow control window size set by the peer. streamSendQuota uint32 } // newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is // returned if something goes wrong. func newHTTP2Server(conn net.Conn, maxStreams uint32) (_ ServerTransport, err error) { framer := newFramer(conn) // Send initial settings as connection preface to client. // TODO(zhaoq): Have a better way to signal "no limit" because 0 is // permitted in the HTTP2 spec. var settings []http2.Setting // TODO(zhaoq): Have a better way to signal "no limit" because 0 is // permitted in the HTTP2 spec. if maxStreams == 0 { maxStreams = math.MaxUint32 } else { settings = append(settings, http2.Setting{http2.SettingMaxConcurrentStreams, maxStreams}) } if initialWindowSize != defaultWindowSize { settings = append(settings, http2.Setting{http2.SettingInitialWindowSize, uint32(initialWindowSize)}) } if err := framer.writeSettings(true, settings...); err != nil { return nil, ConnectionErrorf("transport: %v", err) } // Adjust the connection flow control window if needed. if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 { if err := framer.writeWindowUpdate(true, 0, delta); err != nil { return nil, ConnectionErrorf("transport: %v", err) } } var buf bytes.Buffer t := &http2Server{ conn: conn, framer: framer, hBuf: &buf, hEnc: hpack.NewEncoder(&buf), maxStreams: maxStreams, controlBuf: newRecvBuffer(), fc: &inFlow{limit: initialConnWindowSize}, sendQuotaPool: newQuotaPool(defaultWindowSize), state: reachable, writableChan: make(chan int, 1), shutdownChan: make(chan struct{}), activeStreams: make(map[uint32]*Stream), streamSendQuota: defaultWindowSize, } go t.controller() t.writableChan <- 0 return t, nil } // operateHeader takes action on the decoded headers. It returns the current // stream if there are remaining headers on the wire (in the following // Continuation frame). func (t *http2Server) operateHeaders(hDec *hpackDecoder, s *Stream, frame headerFrame, endStream bool, handle func(*Stream), wg *sync.WaitGroup) (pendingStream *Stream) { defer func() { if pendingStream == nil { hDec.state = decodeState{} } }() endHeaders, err := hDec.decodeServerHTTP2Headers(frame) if s == nil { // s has been closed. return nil } if err != nil { grpclog.Printf("transport: http2Server.operateHeader found %v", err) if se, ok := err.(StreamError); ok { t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]}) } return nil } if endStream { // s is just created by the caller. No lock needed. s.state = streamReadDone } if !endHeaders { return s } t.mu.Lock() if t.state != reachable { t.mu.Unlock() return nil } if uint32(len(t.activeStreams)) >= t.maxStreams { t.mu.Unlock() t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream}) return nil } s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota)) t.activeStreams[s.id] = s t.mu.Unlock() s.windowHandler = func(n int) { t.updateWindow(s, uint32(n)) } if hDec.state.timeoutSet { s.ctx, s.cancel = context.WithTimeout(context.TODO(), hDec.state.timeout) } else { s.ctx, s.cancel = context.WithCancel(context.TODO()) } // Cache the current stream to the context so that the server application // can find out. Required when the server wants to send some metadata // back to the client (unary call only). s.ctx = newContextWithStream(s.ctx, s) // Attach the received metadata to the context. if len(hDec.state.mdata) > 0 { s.ctx = metadata.NewContext(s.ctx, hDec.state.mdata) } s.dec = &recvBufferReader{ ctx: s.ctx, recv: s.buf, } s.method = hDec.state.method wg.Add(1) go func() { handle(s) wg.Done() }() return nil } // HandleStreams receives incoming streams using the given handler. This is // typically run in a separate goroutine. func (t *http2Server) HandleStreams(handle func(*Stream)) { // Check the validity of client preface. preface := make([]byte, len(clientPreface)) if _, err := io.ReadFull(t.conn, preface); err != nil { grpclog.Printf("transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) t.Close() return } if !bytes.Equal(preface, clientPreface) { grpclog.Printf("transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) t.Close() return } frame, err := t.framer.readFrame() if err != nil { grpclog.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err) t.Close() return } sf, ok := frame.(*http2.SettingsFrame) if !ok { grpclog.Printf("transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) t.Close() return } t.handleSettings(sf) hDec := newHPACKDecoder() var curStream *Stream var wg sync.WaitGroup defer wg.Wait() for { frame, err := t.framer.readFrame() if err != nil { t.Close() return } switch frame := frame.(type) { case *http2.HeadersFrame: id := frame.Header().StreamID if id%2 != 1 || id <= t.maxStreamID { // illegal gRPC stream id. grpclog.Println("transport: http2Server.HandleStreams received an illegal stream id: ", id) t.Close() break } t.maxStreamID = id buf := newRecvBuffer() fc := &inFlow{ limit: initialWindowSize, conn: t.fc, } curStream = &Stream{ id: frame.Header().StreamID, st: t, buf: buf, fc: fc, } endStream := frame.Header().Flags.Has(http2.FlagHeadersEndStream) curStream = t.operateHeaders(hDec, curStream, frame, endStream, handle, &wg) case *http2.ContinuationFrame: curStream = t.operateHeaders(hDec, curStream, frame, false, handle, &wg) case *http2.DataFrame: t.handleData(frame) case *http2.RSTStreamFrame: t.handleRSTStream(frame) case *http2.SettingsFrame: t.handleSettings(frame) case *http2.PingFrame: t.handlePing(frame) case *http2.WindowUpdateFrame: t.handleWindowUpdate(frame) case *http2.GoAwayFrame: break default: grpclog.Printf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) } } } func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { t.mu.Lock() defer t.mu.Unlock() if t.activeStreams == nil { // The transport is closing. return nil, false } s, ok := t.activeStreams[f.Header().StreamID] if !ok { // The stream is already done. return nil, false } return s, true } // updateWindow adjusts the inbound quota for the stream and the transport. // Window updates will deliver to the controller for sending when // the cumulative quota exceeds the corresponding threshold. func (t *http2Server) updateWindow(s *Stream, n uint32) { swu, cwu := s.fc.onRead(n) if swu > 0 { t.controlBuf.put(&windowUpdate{s.id, swu}) } if cwu > 0 { t.controlBuf.put(&windowUpdate{0, cwu}) } } func (t *http2Server) handleData(f *http2.DataFrame) { // Select the right stream to dispatch. s, ok := t.getStream(f) if !ok { return } size := len(f.Data()) if err := s.fc.onData(uint32(size)); err != nil { if _, ok := err.(ConnectionError); ok { grpclog.Printf("transport: http2Server %v", err) t.Close() return } t.closeStream(s) t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) return } // TODO(bradfitz, zhaoq): A copy is required here because there is no // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? data := make([]byte, size) copy(data, f.Data()) s.write(recvMsg{data: data}) if f.Header().Flags.Has(http2.FlagDataEndStream) { // Received the end of stream from the client. s.mu.Lock() if s.state != streamDone { if s.state == streamWriteDone { s.state = streamDone } else { s.state = streamReadDone } } s.mu.Unlock() s.write(recvMsg{err: io.EOF}) } } func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) { s, ok := t.getStream(f) if !ok { return } t.closeStream(s) } func (t *http2Server) handleSettings(f *http2.SettingsFrame) { if f.IsAck() { return } f.ForeachSetting(func(s http2.Setting) error { if v, ok := f.Value(http2.SettingInitialWindowSize); ok { t.mu.Lock() defer t.mu.Unlock() for _, s := range t.activeStreams { s.sendQuotaPool.reset(int(v - t.streamSendQuota)) } t.streamSendQuota = v } return nil }) t.controlBuf.put(&settings{ack: true}) } func (t *http2Server) handlePing(f *http2.PingFrame) { t.controlBuf.put(&ping{true}) } func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { id := f.Header().StreamID incr := f.Increment if id == 0 { t.sendQuotaPool.add(int(incr)) return } if s, ok := t.getStream(f); ok { s.sendQuotaPool.add(int(incr)) } } func (t *http2Server) writeHeaders(s *Stream, b *bytes.Buffer, endStream bool) error { first := true endHeaders := false var err error // Sends the headers in a single batch. for !endHeaders { size := t.hBuf.Len() if size > http2MaxFrameLen { size = http2MaxFrameLen } else { endHeaders = true } if first { p := http2.HeadersFrameParam{ StreamID: s.id, BlockFragment: b.Next(size), EndStream: endStream, EndHeaders: endHeaders, } err = t.framer.writeHeaders(endHeaders, p) first = false } else { err = t.framer.writeContinuation(endHeaders, s.id, endHeaders, b.Next(size)) } if err != nil { t.Close() return ConnectionErrorf("transport: %v", err) } } return nil } // WriteHeader sends the header metedata md back to the client. func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { s.mu.Lock() if s.headerOk || s.state == streamDone { s.mu.Unlock() return ErrIllegalHeaderWrite } s.headerOk = true s.mu.Unlock() if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { return err } t.hBuf.Reset() t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) for k, v := range md { t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) } if err := t.writeHeaders(s, t.hBuf, false); err != nil { return err } t.writableChan <- 0 return nil } // WriteStatus sends stream status to the client and terminates the stream. // There is no further I/O operations being able to perform on this stream. // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early // OK is adopted. func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error { s.mu.RLock() if s.state == streamDone { s.mu.RUnlock() return nil } s.mu.RUnlock() if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { return err } t.hBuf.Reset() t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) t.hEnc.WriteField( hpack.HeaderField{ Name: "grpc-status", Value: strconv.Itoa(int(statusCode)), }) t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: statusDesc}) // Attach the trailer metadata. for k, v := range s.trailer { t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) } if err := t.writeHeaders(s, t.hBuf, true); err != nil { t.Close() return err } t.closeStream(s) t.writableChan <- 0 return nil } // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error { // TODO(zhaoq): Support multi-writers for a single stream. var writeHeaderFrame bool s.mu.Lock() if !s.headerOk { writeHeaderFrame = true s.headerOk = true } s.mu.Unlock() if writeHeaderFrame { if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { return err } t.hBuf.Reset() t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) p := http2.HeadersFrameParam{ StreamID: s.id, BlockFragment: t.hBuf.Bytes(), EndHeaders: true, } if err := t.framer.writeHeaders(false, p); err != nil { t.Close() return ConnectionErrorf("transport: %v", err) } t.writableChan <- 0 } r := bytes.NewBuffer(data) for { if r.Len() == 0 { return nil } size := http2MaxFrameLen s.sendQuotaPool.add(0) // Wait until the stream has some quota to send the data. sq, err := wait(s.ctx, t.shutdownChan, s.sendQuotaPool.acquire()) if err != nil { return err } t.sendQuotaPool.add(0) // Wait until the transport has some quota to send the data. tq, err := wait(s.ctx, t.shutdownChan, t.sendQuotaPool.acquire()) if err != nil { if _, ok := err.(StreamError); ok { t.sendQuotaPool.cancel() } return err } if sq < size { size = sq } if tq < size { size = tq } p := r.Next(size) ps := len(p) if ps < sq { // Overbooked stream quota. Return it back. s.sendQuotaPool.add(sq - ps) } if ps < tq { // Overbooked transport quota. Return it back. t.sendQuotaPool.add(tq - ps) } t.framer.adjustNumWriters(1) // Got some quota. Try to acquire writing privilege on the // transport. if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { if t.framer.adjustNumWriters(-1) == 0 { // This writer is the last one in this batch and has the // responsibility to flush the buffered frames. It queues // a flush request to controlBuf instead of flushing directly // in order to avoid the race with other writing or flushing. t.controlBuf.put(&flushIO{}) } return err } var forceFlush bool if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 && !opts.Last { forceFlush = true } if err := t.framer.writeData(forceFlush, s.id, false, p); err != nil { t.Close() return ConnectionErrorf("transport: %v", err) } if t.framer.adjustNumWriters(-1) == 0 { t.framer.flushWrite() } t.writableChan <- 0 } } // controller running in a separate goroutine takes charge of sending control // frames (e.g., window update, reset stream, setting, etc.) to the server. func (t *http2Server) controller() { for { select { case i := <-t.controlBuf.get(): t.controlBuf.load() select { case <-t.writableChan: switch i := i.(type) { case *windowUpdate: t.framer.writeWindowUpdate(true, i.streamID, i.increment) case *settings: if i.ack { t.framer.writeSettingsAck(true) } else { t.framer.writeSettings(true, i.setting...) } case *resetStream: t.framer.writeRSTStream(true, i.streamID, i.code) case *flushIO: t.framer.flushWrite() case *ping: // TODO(zhaoq): Ack with all-0 data now. will change to some // meaningful content when this is actually in use. t.framer.writePing(true, i.ack, [8]byte{}) default: grpclog.Printf("transport: http2Server.controller got unexpected item type %v\n", i) } t.writableChan <- 0 continue case <-t.shutdownChan: return } case <-t.shutdownChan: return } } } // Close starts shutting down the http2Server transport. // TODO(zhaoq): Now the destruction is not blocked on any pending streams. This // could cause some resource issue. Revisit this later. func (t *http2Server) Close() (err error) { t.mu.Lock() if t.state == closing { t.mu.Unlock() return errors.New("transport: Close() was already called") } t.state = closing streams := t.activeStreams t.activeStreams = nil t.mu.Unlock() close(t.shutdownChan) err = t.conn.Close() // Notify all active streams. for _, s := range streams { s.write(recvMsg{err: ErrConnClosing}) } return } // closeStream clears the footprint of a stream when the stream is not needed // any more. func (t *http2Server) closeStream(s *Stream) { t.mu.Lock() delete(t.activeStreams, s.id) t.mu.Unlock() if q := s.fc.restoreConn(); q > 0 { t.controlBuf.put(&windowUpdate{0, q}) } s.mu.Lock() if s.state == streamDone { s.mu.Unlock() return } s.state = streamDone s.mu.Unlock() // In case stream sending and receiving are invoked in separate // goroutines (e.g., bi-directional streaming), the caller needs // to call cancel on the stream to interrupt the blocking on // other goroutines. s.cancel() } ================================================ FILE: vendor/google.golang.org/grpc/transport/http_util.go ================================================ /* * * Copyright 2014, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ package transport import ( "bufio" "fmt" "io" "net" "strconv" "sync/atomic" "time" "github.com/bradfitz/http2" "github.com/bradfitz/http2/hpack" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" ) const ( // http2MaxFrameLen specifies the max length of a HTTP2 frame. http2MaxFrameLen = 16384 // 16KB frame // http://http2.github.io/http2-spec/#SettingValues http2InitHeaderTableSize = 4096 // http2IOBufSize specifies the buffer size for sending frames. http2IOBufSize = 32 * 1024 ) var ( clientPreface = []byte(http2.ClientPreface) ) var http2RSTErrConvTab = map[http2.ErrCode]codes.Code{ http2.ErrCodeNo: codes.Internal, http2.ErrCodeProtocol: codes.Internal, http2.ErrCodeInternal: codes.Internal, http2.ErrCodeFlowControl: codes.Internal, http2.ErrCodeSettingsTimeout: codes.Internal, http2.ErrCodeFrameSize: codes.Internal, http2.ErrCodeRefusedStream: codes.Unavailable, http2.ErrCodeCancel: codes.Canceled, http2.ErrCodeCompression: codes.Internal, http2.ErrCodeConnect: codes.Internal, http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, http2.ErrCodeInadequateSecurity: codes.PermissionDenied, } var statusCodeConvTab = map[codes.Code]http2.ErrCode{ codes.Internal: http2.ErrCodeInternal, // pick an arbitrary one which is matched. codes.Canceled: http2.ErrCodeCancel, codes.Unavailable: http2.ErrCodeRefusedStream, codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm, codes.PermissionDenied: http2.ErrCodeInadequateSecurity, } // Records the states during HPACK decoding. Must be reset once the // decoding of the entire headers are finished. type decodeState struct { // statusCode caches the stream status received from the trailer // the server sent. Client side only. statusCode codes.Code statusDesc string // Server side only fields. timeoutSet bool timeout time.Duration method string // key-value metadata map from the peer. mdata map[string]string } // An hpackDecoder decodes HTTP2 headers which may span multiple frames. type hpackDecoder struct { h *hpack.Decoder state decodeState err error // The err when decoding } // A headerFrame is either a http2.HeaderFrame or http2.ContinuationFrame. type headerFrame interface { Header() http2.FrameHeader HeaderBlockFragment() []byte HeadersEnded() bool } // isReservedHeader checks whether hdr belongs to HTTP2 headers // reserved by gRPC protocol. Any other headers are classified as the // user-specified metadata. func isReservedHeader(hdr string) bool { if hdr[0] == ':' { return true } switch hdr { case "content-type", "grpc-message-type", "grpc-encoding", "grpc-message", "grpc-status", "grpc-timeout", "te", "user-agent": return true default: return false } } func newHPACKDecoder() *hpackDecoder { d := &hpackDecoder{} d.h = hpack.NewDecoder(http2InitHeaderTableSize, func(f hpack.HeaderField) { switch f.Name { case "grpc-status": code, err := strconv.Atoi(f.Value) if err != nil { d.err = StreamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err) return } d.state.statusCode = codes.Code(code) case "grpc-message": d.state.statusDesc = f.Value case "grpc-timeout": d.state.timeoutSet = true var err error d.state.timeout, err = timeoutDecode(f.Value) if err != nil { d.err = StreamErrorf(codes.Internal, "transport: malformed time-out: %v", err) return } case ":path": d.state.method = f.Value default: if !isReservedHeader(f.Name) { if d.state.mdata == nil { d.state.mdata = make(map[string]string) } k, v, err := metadata.DecodeKeyValue(f.Name, f.Value) if err != nil { grpclog.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err) return } d.state.mdata[k] = v } } }) return d } func (d *hpackDecoder) decodeClientHTTP2Headers(frame headerFrame) (endHeaders bool, err error) { d.err = nil _, err = d.h.Write(frame.HeaderBlockFragment()) if err != nil { err = StreamErrorf(codes.Internal, "transport: HPACK header decode error: %v", err) } if frame.HeadersEnded() { if closeErr := d.h.Close(); closeErr != nil && err == nil { err = StreamErrorf(codes.Internal, "transport: HPACK decoder close error: %v", closeErr) } endHeaders = true } if err == nil && d.err != nil { err = d.err } return } func (d *hpackDecoder) decodeServerHTTP2Headers(frame headerFrame) (endHeaders bool, err error) { d.err = nil _, err = d.h.Write(frame.HeaderBlockFragment()) if err != nil { err = StreamErrorf(codes.Internal, "transport: HPACK header decode error: %v", err) } if frame.HeadersEnded() { if closeErr := d.h.Close(); closeErr != nil && err == nil { err = StreamErrorf(codes.Internal, "transport: HPACK decoder close error: %v", closeErr) } endHeaders = true } if err == nil && d.err != nil { err = d.err } return } type timeoutUnit uint8 const ( hour timeoutUnit = 'H' minute timeoutUnit = 'M' second timeoutUnit = 'S' millisecond timeoutUnit = 'm' microsecond timeoutUnit = 'u' nanosecond timeoutUnit = 'n' ) func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) { switch u { case hour: return time.Hour, true case minute: return time.Minute, true case second: return time.Second, true case millisecond: return time.Millisecond, true case microsecond: return time.Microsecond, true case nanosecond: return time.Nanosecond, true default: } return } const maxTimeoutValue int64 = 100000000 - 1 // div does integer division and round-up the result. Note that this is // equivalent to (d+r-1)/r but has less chance to overflow. func div(d, r time.Duration) int64 { if m := d % r; m > 0 { return int64(d/r + 1) } return int64(d / r) } // TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it. func timeoutEncode(t time.Duration) string { if d := div(t, time.Nanosecond); d <= maxTimeoutValue { return strconv.FormatInt(d, 10) + "n" } if d := div(t, time.Microsecond); d <= maxTimeoutValue { return strconv.FormatInt(d, 10) + "u" } if d := div(t, time.Millisecond); d <= maxTimeoutValue { return strconv.FormatInt(d, 10) + "m" } if d := div(t, time.Second); d <= maxTimeoutValue { return strconv.FormatInt(d, 10) + "S" } if d := div(t, time.Minute); d <= maxTimeoutValue { return strconv.FormatInt(d, 10) + "M" } // Note that maxTimeoutValue * time.Hour > MaxInt64. return strconv.FormatInt(div(t, time.Hour), 10) + "H" } func timeoutDecode(s string) (time.Duration, error) { size := len(s) if size < 2 { return 0, fmt.Errorf("transport: timeout string is too short: %q", s) } unit := timeoutUnit(s[size-1]) d, ok := timeoutUnitToDuration(unit) if !ok { return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s) } t, err := strconv.ParseInt(s[:size-1], 10, 64) if err != nil { return 0, err } return d * time.Duration(t), nil } type framer struct { numWriters int32 reader io.Reader writer *bufio.Writer fr *http2.Framer } func newFramer(conn net.Conn) *framer { f := &framer{ reader: conn, writer: bufio.NewWriterSize(conn, http2IOBufSize), } f.fr = http2.NewFramer(f.writer, f.reader) return f } func (f *framer) adjustNumWriters(i int32) int32 { return atomic.AddInt32(&f.numWriters, i) } // The following writeXXX functions can only be called when the caller gets // unblocked from writableChan channel (i.e., owns the privilege to write). func (f *framer) writeContinuation(forceFlush bool, streamID uint32, endHeaders bool, headerBlockFragment []byte) error { if err := f.fr.WriteContinuation(streamID, endHeaders, headerBlockFragment); err != nil { return err } if forceFlush { return f.writer.Flush() } return nil } func (f *framer) writeData(forceFlush bool, streamID uint32, endStream bool, data []byte) error { if err := f.fr.WriteData(streamID, endStream, data); err != nil { return err } if forceFlush { return f.writer.Flush() } return nil } func (f *framer) writeGoAway(forceFlush bool, maxStreamID uint32, code http2.ErrCode, debugData []byte) error { if err := f.fr.WriteGoAway(maxStreamID, code, debugData); err != nil { return err } if forceFlush { return f.writer.Flush() } return nil } func (f *framer) writeHeaders(forceFlush bool, p http2.HeadersFrameParam) error { if err := f.fr.WriteHeaders(p); err != nil { return err } if forceFlush { return f.writer.Flush() } return nil } func (f *framer) writePing(forceFlush, ack bool, data [8]byte) error { if err := f.fr.WritePing(ack, data); err != nil { return err } if forceFlush { return f.writer.Flush() } return nil } func (f *framer) writePriority(forceFlush bool, streamID uint32, p http2.PriorityParam) error { if err := f.fr.WritePriority(streamID, p); err != nil { return err } if forceFlush { return f.writer.Flush() } return nil } func (f *framer) writePushPromise(forceFlush bool, p http2.PushPromiseParam) error { if err := f.fr.WritePushPromise(p); err != nil { return err } if forceFlush { return f.writer.Flush() } return nil } func (f *framer) writeRSTStream(forceFlush bool, streamID uint32, code http2.ErrCode) error { if err := f.fr.WriteRSTStream(streamID, code); err != nil { return err } if forceFlush { return f.writer.Flush() } return nil } func (f *framer) writeSettings(forceFlush bool, settings ...http2.Setting) error { if err := f.fr.WriteSettings(settings...); err != nil { return err } if forceFlush { return f.writer.Flush() } return nil } func (f *framer) writeSettingsAck(forceFlush bool) error { if err := f.fr.WriteSettingsAck(); err != nil { return err } if forceFlush { return f.writer.Flush() } return nil } func (f *framer) writeWindowUpdate(forceFlush bool, streamID, incr uint32) error { if err := f.fr.WriteWindowUpdate(streamID, incr); err != nil { return err } if forceFlush { return f.writer.Flush() } return nil } func (f *framer) flushWrite() error { return f.writer.Flush() } func (f *framer) readFrame() (http2.Frame, error) { return f.fr.ReadFrame() } ================================================ FILE: vendor/google.golang.org/grpc/transport/transport.go ================================================ /* * * Copyright 2014, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ /* Package transport defines and implements message oriented communication channel to complete various transactions (e.g., an RPC). */ package transport import ( "bytes" "errors" "fmt" "io" "net" "sync" "time" "golang.org/x/net/context" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/metadata" ) // recvMsg represents the received msg from the transport. All transport // protocol specific info has been removed. type recvMsg struct { data []byte // nil: received some data // io.EOF: stream is completed. data is nil. // other non-nil error: transport failure. data is nil. err error } func (recvMsg) isItem() bool { return true } // All items in an out of a recvBuffer should be the same type. type item interface { isItem() bool } // recvBuffer is an unbounded channel of item. type recvBuffer struct { c chan item mu sync.Mutex backlog []item } func newRecvBuffer() *recvBuffer { b := &recvBuffer{ c: make(chan item, 1), } return b } func (b *recvBuffer) put(r item) { b.mu.Lock() defer b.mu.Unlock() b.backlog = append(b.backlog, r) select { case b.c <- b.backlog[0]: b.backlog = b.backlog[1:] default: } } func (b *recvBuffer) load() { b.mu.Lock() defer b.mu.Unlock() if len(b.backlog) > 0 { select { case b.c <- b.backlog[0]: b.backlog = b.backlog[1:] default: } } } // get returns the channel that receives an item in the buffer. // // Upon receipt of an item, the caller should call load to send another // item onto the channel if there is any. func (b *recvBuffer) get() <-chan item { return b.c } // recvBufferReader implements io.Reader interface to read the data from // recvBuffer. type recvBufferReader struct { ctx context.Context recv *recvBuffer last *bytes.Reader // Stores the remaining data in the previous calls. err error } // Read reads the next len(p) bytes from last. If last is drained, it tries to // read additional data from recv. It blocks if there no additional data available // in recv. If Read returns any non-nil error, it will continue to return that error. func (r *recvBufferReader) Read(p []byte) (n int, err error) { if r.err != nil { return 0, r.err } defer func() { r.err = err }() if r.last != nil && r.last.Len() > 0 { // Read remaining data left in last call. return r.last.Read(p) } select { case <-r.ctx.Done(): return 0, ContextErr(r.ctx.Err()) case i := <-r.recv.get(): r.recv.load() m := i.(*recvMsg) if m.err != nil { return 0, m.err } r.last = bytes.NewReader(m.data) return r.last.Read(p) } } type streamState uint8 const ( streamActive streamState = iota streamWriteDone // EndStream sent streamReadDone // EndStream received streamDone // sendDone and recvDone or RSTStreamFrame is sent or received. ) // Stream represents an RPC in the transport layer. type Stream struct { id uint32 // nil for client side Stream. st ServerTransport // ctx is the associated context of the stream. ctx context.Context cancel context.CancelFunc // method records the associated RPC method of the stream. method string buf *recvBuffer dec io.Reader // updateStreams indicates whether the transport's streamsQuota needed // to be updated when this stream is closed. It is false when the transport // sticks to the initial infinite value of the number of concurrent streams. // Ture otherwise. updateStreams bool fc *inFlow recvQuota uint32 // The accumulated inbound quota pending for window update. updateQuota uint32 // The handler to control the window update procedure for both this // particular stream and the associated transport. windowHandler func(int) sendQuotaPool *quotaPool // Close headerChan to indicate the end of reception of header metadata. headerChan chan struct{} // header caches the received header metadata. header metadata.MD // The key-value map of trailer metadata. trailer metadata.MD mu sync.RWMutex // guard the following // headerOK becomes true from the first header is about to send. headerOk bool state streamState // true iff headerChan is closed. Used to avoid closing headerChan // multiple times. headerDone bool // the status received from the server. statusCode codes.Code statusDesc string } // Header acquires the key-value pairs of header metadata once it // is available. It blocks until i) the metadata is ready or ii) there is no // header metadata or iii) the stream is cancelled/expired. func (s *Stream) Header() (metadata.MD, error) { select { case <-s.ctx.Done(): return nil, ContextErr(s.ctx.Err()) case <-s.headerChan: return s.header.Copy(), nil } } // Trailer returns the cached trailer metedata. Note that if it is not called // after the entire stream is done, it could return an empty MD. Client // side only. func (s *Stream) Trailer() metadata.MD { s.mu.RLock() defer s.mu.RUnlock() return s.trailer.Copy() } // ServerTransport returns the underlying ServerTransport for the stream. // The client side stream always returns nil. func (s *Stream) ServerTransport() ServerTransport { return s.st } // Context returns the context of the stream. func (s *Stream) Context() context.Context { return s.ctx } // Method returns the method for the stream. func (s *Stream) Method() string { return s.method } // StatusCode returns statusCode received from the server. func (s *Stream) StatusCode() codes.Code { return s.statusCode } // StatusDesc returns statusDesc received from the server. func (s *Stream) StatusDesc() string { return s.statusDesc } // ErrIllegalTrailerSet indicates that the trailer has already been set or it // is too late to do so. var ErrIllegalTrailerSet = errors.New("transport: trailer has been set") // SetTrailer sets the trailer metadata which will be sent with the RPC status // by the server. This can only be called at most once. Server side only. func (s *Stream) SetTrailer(md metadata.MD) error { s.mu.Lock() defer s.mu.Unlock() if s.trailer != nil { return ErrIllegalTrailerSet } s.trailer = md.Copy() return nil } func (s *Stream) write(m recvMsg) { s.buf.put(&m) } // Read reads all the data available for this Stream from the transport and // passes them into the decoder, which converts them into a gRPC message stream. // The error is io.EOF when the stream is done or another non-nil error if // the stream broke. func (s *Stream) Read(p []byte) (n int, err error) { n, err = s.dec.Read(p) if err != nil { return } s.windowHandler(n) return } type key int // The key to save transport.Stream in the context. const streamKey = key(0) // newContextWithStream creates a new context from ctx and attaches stream // to it. func newContextWithStream(ctx context.Context, stream *Stream) context.Context { return context.WithValue(ctx, streamKey, stream) } // StreamFromContext returns the stream saved in ctx. func StreamFromContext(ctx context.Context) (s *Stream, ok bool) { s, ok = ctx.Value(streamKey).(*Stream) return } // state of transport type transportState int const ( reachable transportState = iota unreachable closing ) // NewServerTransport creates a ServerTransport with conn or non-nil error // if it fails. func NewServerTransport(protocol string, conn net.Conn, maxStreams uint32) (ServerTransport, error) { return newHTTP2Server(conn, maxStreams) } // ConnectOptions covers all relevant options for dialing a server. type ConnectOptions struct { Dialer func(string, time.Duration) (net.Conn, error) AuthOptions []credentials.Credentials Timeout time.Duration } // NewClientTransport establishes the transport with the required ConnectOptions // and returns it to the caller. func NewClientTransport(target string, opts *ConnectOptions) (ClientTransport, error) { return newHTTP2Client(target, opts) } // Options provides additional hints and information for message // transmission. type Options struct { // Indicate whether it is the last piece for this stream. Last bool // The hint to transport impl whether the data could be buffered for // batching write. Transport impl can feel free to ignore it. Delay bool } // CallHdr carries the information of a particular RPC. type CallHdr struct { Host string // peer host Method string // the operation to perform on the specified host } // ClientTransport is the common interface for all gRPC client side transport // implementations. type ClientTransport interface { // Close tears down this transport. Once it returns, the transport // should not be accessed any more. The caller must make sure this // is called only once. Close() error // Write sends the data for the given stream. A nil stream indicates // the write is to be performed on the transport as a whole. Write(s *Stream, data []byte, opts *Options) error // NewStream creates a Stream for an RPC. NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) // CloseStream clears the footprint of a stream when the stream is // not needed any more. The err indicates the error incurred when // CloseStream is called. Must be called when a stream is finished // unless the associated transport is closing. CloseStream(stream *Stream, err error) // Error returns a channel that is closed when some I/O error // happens. Typically the caller should have a goroutine to monitor // this in order to take action (e.g., close the current transport // and create a new one) in error case. It should not return nil // once the transport is initiated. Error() <-chan struct{} } // ServerTransport is the common interface for all gRPC server side transport // implementations. type ServerTransport interface { // WriteStatus sends the status of a stream to the client. WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error // Write sends the data for the given stream. Write(s *Stream, data []byte, opts *Options) error // WriteHeader sends the header metedata for the given stream. WriteHeader(s *Stream, md metadata.MD) error // HandleStreams receives incoming streams using the given handler. HandleStreams(func(*Stream)) // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their // handlers will be terminated asynchronously. Close() error } // StreamErrorf creates an StreamError with the specified error code and description. func StreamErrorf(c codes.Code, format string, a ...interface{}) StreamError { return StreamError{ Code: c, Desc: fmt.Sprintf(format, a...), } } // ConnectionErrorf creates an ConnectionError with the specified error description. func ConnectionErrorf(format string, a ...interface{}) ConnectionError { return ConnectionError{ Desc: fmt.Sprintf(format, a...), } } // ConnectionError is an error that results in the termination of the // entire connection and the retry of all the active streams. type ConnectionError struct { Desc string } func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: desc = %q", e.Desc) } // Define some common ConnectionErrors. var ErrConnClosing = ConnectionError{Desc: "transport is closing"} // StreamError is an error that only affects one stream within a connection. type StreamError struct { Code codes.Code Desc string } func (e StreamError) Error() string { return fmt.Sprintf("stream error: code = %d desc = %q", e.Code, e.Desc) } // ContextErr converts the error from context package into a StreamError. func ContextErr(err error) StreamError { switch err { case context.DeadlineExceeded: return StreamErrorf(codes.DeadlineExceeded, "%v", err) case context.Canceled: return StreamErrorf(codes.Canceled, "%v", err) } panic(fmt.Sprintf("Unexpected error from context packet: %v", err)) } // wait blocks until it can receive from ctx.Done, closing, or proceed. // If it receives from ctx.Done, it returns 0, the StreamError for ctx.Err. // If it receives from closing, it returns 0, ErrConnClosing. // If it receives from proceed, it returns the received integer, nil. func wait(ctx context.Context, closing <-chan struct{}, proceed <-chan int) (int, error) { select { case <-ctx.Done(): return 0, ContextErr(ctx.Err()) case <-closing: return 0, ErrConnClosing case i := <-proceed: return i, nil } }